From 9ffb397c65797792b97ecf52e3546121f254154a Mon Sep 17 00:00:00 2001 From: Gary Mawdsley <79173+gmawdo@users.noreply.github.com> Date: Thu, 23 Jan 2025 12:11:55 +0000 Subject: [PATCH] remove obsolete files --- entrypoint.sh | 58 - local_ci.sh | 41 - tests/advance.sh | 0 tests/basic.sh | 0 tests/configure/7000/redis.conf | 8 - tests/configure/7001/redis.conf | 8 - tests/configure/7002/redis.conf | 8 - tests/configure/master/Dockerfile | 72 - tests/configure/master/docker-entrypoint.sh | 16 - tests/configure/master/redis-trib.rb | 1710 ------------------- tests/configure/master/redis.conf | 1053 ------------ tests/docker-compose.yml | 78 - tests/intermediate.sh | 0 tests/setup.sh | 7 - 14 files changed, 3059 deletions(-) delete mode 100644 entrypoint.sh delete mode 100644 local_ci.sh delete mode 100644 tests/advance.sh delete mode 100644 tests/basic.sh delete mode 100644 tests/configure/7000/redis.conf delete mode 100644 tests/configure/7001/redis.conf delete mode 100644 tests/configure/7002/redis.conf delete mode 100644 tests/configure/master/Dockerfile delete mode 100755 tests/configure/master/docker-entrypoint.sh delete mode 100755 tests/configure/master/redis-trib.rb delete mode 100644 tests/configure/master/redis.conf delete mode 100644 tests/docker-compose.yml delete mode 100644 tests/intermediate.sh delete mode 100755 tests/setup.sh diff --git a/entrypoint.sh b/entrypoint.sh deleted file mode 100644 index 3c61e76..0000000 --- a/entrypoint.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh - -# Start Redis servers -#redis-server /app/redis/redis-6380.conf & -#redis-server /app/redis/redis-6381.conf & -#redis-server /app/redis/redis-6382.conf & - -# Wait for Redis servers to start -#sleep 5 - -# Create the Redis cluster -#yes | redis-cli --cluster create 127.0.0.1:6380 127.0.0.1:6381 127.0.0.1:6382 --cluster-replicas 0 --cluster-yes -#yes | redis-cli --cluster create 127.0.0.1:6380 127.0.0.1:6381 127.0.0.1:6382 --cluster-replicas 0 -a 0rangerY --cluster-yes - - -# Edit the /etc/exports file to configure the NFS exports and add an entry to allow access from the client IP or all IPs -echo "/mnt/nfs *(rw,sync,no_subtree_check)" >> /etc/exports - - -# Start the NFS server -/usr/local/bin/graymamba /mnt/nfs & - -# Wait for the NFS server to start -sleep 5 - - -# Mount the NFS filesystem -mount -t nfs -o nolocks,tcp,rsize=131072,actimeo=120,port=2049,mountport=2049 localhost:/ ../mount_point -# mount -t nfs -o tcp,rsize=131072,port=2049,mountport=2049 localhost:/ /mount_point - -# Check if the mount was successful -if mountpoint -q /mount_point; then - echo "NFS filesystem mounted successfully." - - # Perform basic functionality tests - echo "Running basic functionality tests..." - - # Test 1: Create a directory - mkdir -p /mount_point/test_dir && echo "Directory creation test: PASSED" || echo "Directory creation test: FAILED" - - # Test 2: Create a file - echo "Hello, NFS!" > /mount_point/test_dir/test_file && echo "File creation test: PASSED" || echo "File creation test: FAILED" - - # Test 3: Read the file - cat /mount_point/test_dir/test_file && echo "File read test: PASSED" || echo "File read test: FAILED" - - # Test 4: Delete the file - rm /mount_point/test_dir/test_file && echo "File deletion test: PASSED" || echo "File deletion test: FAILED" - - # Test 5: Delete the directory - rmdir /mount_point/test_dir && echo "Directory deletion test: PASSED" || echo "Directory deletion test: FAILED" - - else - echo "Failed to mount NFS filesystem." - fi - -# Keep the container running -tail -f /dev/null diff --git a/local_ci.sh b/local_ci.sh deleted file mode 100644 index 17b0644..0000000 --- a/local_ci.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -# Define variables -IMAGE_NAME="datasignals/sptfs" -TAG="local-$(date +'%Y%m%d-%H%M%S')" -FULL_IMAGE_NAME="${IMAGE_NAME}:${TAG}" - -# Step 1: Build Docker image -echo "Building Docker image..." -docker build -t $FULL_IMAGE_NAME . - -if [ $? -ne 0 ]; then - echo "Docker build failed." - exit 1 -fi - -# Step 2: Setup environment -echo "Setting up environment..." -bash ./tests/setup.sh - -if [ $? -ne 0 ]; then - echo "Environment setup failed." - exit 1 -fi - -# Step 3: Run tests -echo "Running tests..." -docker run -v "$(pwd)/tests:/mount_point/tests" $FULL_IMAGE_NAME /bin/bash -c "/mount_point/tests/basic.sh" && -docker run -v "$(pwd)/tests:/mount_point/tests" $FULL_IMAGE -_NAME /bin/bash -c "/mount_point/tests/intermediate.sh" && -docker run -v "$(pwd)/tests:/mount_point/tests" $FULL_IMAGE_NAME /bin/bash -c "/mount_point/tests/advance.sh" -if [ $? -eq 0 ]; then - echo "Tests passed. Pushing Docker image..." - # Step 4: Push Docker image - docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" - docker push $FULL_IMAGE_NAME - echo "Docker image pushed: $FULL_IMAGE_NAME" -else - echo "Tests failed. Not pushing Docker image." - exit 1 -fi \ No newline at end of file diff --git a/tests/advance.sh b/tests/advance.sh deleted file mode 100644 index e69de29..0000000 diff --git a/tests/basic.sh b/tests/basic.sh deleted file mode 100644 index e69de29..0000000 diff --git a/tests/configure/7000/redis.conf b/tests/configure/7000/redis.conf deleted file mode 100644 index 38fd174..0000000 --- a/tests/configure/7000/redis.conf +++ /dev/null @@ -1,8 +0,0 @@ -port 8000 -cluster-enabled yes -cluster-config-file nodes.conf -cluster-node-timeout 5000 -appendonly yes -requirepass "myredis" -masterauth "myredis" -logfile "redis.log" \ No newline at end of file diff --git a/tests/configure/7001/redis.conf b/tests/configure/7001/redis.conf deleted file mode 100644 index 1fee573..0000000 --- a/tests/configure/7001/redis.conf +++ /dev/null @@ -1,8 +0,0 @@ -port 8001 -cluster-enabled yes -cluster-config-file nodes.conf -cluster-node-timeout 5000 -appendonly yes -requirepass "myredis" -masterauth "myredis" -logfile "redis.log" diff --git a/tests/configure/7002/redis.conf b/tests/configure/7002/redis.conf deleted file mode 100644 index f2cc002..0000000 --- a/tests/configure/7002/redis.conf +++ /dev/null @@ -1,8 +0,0 @@ -port 8002 -cluster-enabled yes -cluster-config-file nodes.conf -cluster-node-timeout 5000 -appendonly yes -requirepass "myredis" -masterauth "myredis" -logfile "redis.log" diff --git a/tests/configure/master/Dockerfile b/tests/configure/master/Dockerfile deleted file mode 100644 index 45b1a0b..0000000 --- a/tests/configure/master/Dockerfile +++ /dev/null @@ -1,72 +0,0 @@ -FROM alpine:3.7 - -# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added -RUN addgroup -S redis && adduser -S -G redis redis - -RUN apk update && apk upgrade -RUN apk add ruby - -# grab su-exec for easy step-down from root -RUN apk add --no-cache 'su-exec>=0.2' - -ENV REDIS_VERSION 4.0.9 -ENV REDIS_DOWNLOAD_URL http://download.redis.io/releases/redis-4.0.9.tar.gz -ENV REDIS_DOWNLOAD_SHA df4f73bc318e2f9ffb2d169a922dec57ec7c73dd07bccf875695dbeecd5ec510 - -# for redis-sentinel see: http://redis.io/topics/sentinel -# RUN apk update && apk upgrade && apk --update add ruby -RUN set -ex; \ - \ - apk add --no-cache --virtual .build-deps \ - coreutils \ - gcc \ - jemalloc-dev \ - linux-headers \ - make \ - musl-dev \ - ruby \ - ; \ - \ - wget -O redis.tar.gz "$REDIS_DOWNLOAD_URL"; \ - echo "$REDIS_DOWNLOAD_SHA *redis.tar.gz" | sha256sum -c -; \ - mkdir -p /usr/src/redis; \ - tar -xzf redis.tar.gz -C /usr/src/redis --strip-components=1; \ - rm redis.tar.gz; \ - \ -# disable Redis protected mode [1] as it is unnecessary in context of Docker -# (ports are not automatically exposed when running inside Docker, but rather explicitly by specifying -p / -P) -# [1]: https://github.com/antirez/redis/commit/edd4d555df57dc84265fdfb4ef59a4678832f6da - grep -q '^#define CONFIG_DEFAULT_PROTECTED_MODE 1$' /usr/src/redis/src/server.h; \ - sed -ri 's!^(#define CONFIG_DEFAULT_PROTECTED_MODE) 1$!\1 0!' /usr/src/redis/src/server.h; \ - grep -q '^#define CONFIG_DEFAULT_PROTECTED_MODE 0$' /usr/src/redis/src/server.h; \ -# for future reference, we modify this directly in the source instead of just supplying a default configuration flag because apparently "if you specify any argument to redis-server, [it assumes] you are going to specify everything" -# see also https://github.com/docker-library/redis/issues/4#issuecomment-50780840 -# (more exactly, this makes sure the default behavior of "save on SIGTERM" stays functional by default) - \ - make -C /usr/src/redis -j "$(nproc)"; \ - make -C /usr/src/redis install; \ - \ - # rm -r /usr/src/redis; \ - \ - runDeps="$( \ - scanelf --needed --nobanner --format '%n#p' --recursive /usr/local \ - | tr ',' '\n' \ - | sort -u \ - | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \ - )"; \ - apk add --virtual .redis-rundeps $runDeps; \ - apk del .build-deps; \ - \ - redis-server --version - -RUN mkdir /data && chown redis:redis /data -RUN gem install redis || true -VOLUME /data -WORKDIR /data - -COPY docker-entrypoint.sh /usr/local/bin/ -COPY redis-trib.rb /data/redis-trib.rb -ENTRYPOINT ["docker-entrypoint.sh"] - -# EXPOSE 6379 -CMD ["redis-server"] \ No newline at end of file diff --git a/tests/configure/master/docker-entrypoint.sh b/tests/configure/master/docker-entrypoint.sh deleted file mode 100755 index 0bb239f..0000000 --- a/tests/configure/master/docker-entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh -set -e - -# first arg is `-f` or `--some-option` -# or first arg is `something.conf` -if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then - set -- redis-server "$@" -fi - -# allow the container to be started with `--user` -if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then - chown -R redis . - exec su-exec redis "$0" "$@" -fi - -exec "$@" \ No newline at end of file diff --git a/tests/configure/master/redis-trib.rb b/tests/configure/master/redis-trib.rb deleted file mode 100755 index 4a8b765..0000000 --- a/tests/configure/master/redis-trib.rb +++ /dev/null @@ -1,1710 +0,0 @@ -#!/usr/bin/env ruby - -# TODO (temporary here, we'll move this into the Github issues once -# redis-trib initial implementation is completed). -# -# - Make sure that if the rehashing fails in the middle redis-trib will try -# to recover. -# - When redis-trib performs a cluster check, if it detects a slot move in -# progress it should prompt the user to continue the move from where it -# stopped. -# - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop -# while rehashing, and performing the best cleanup possible if the user -# forces the quit. -# - When doing "fix" set a global Fix to true, and prompt the user to -# fix the problem if automatically fixable every time there is something -# to fix. For instance: -# 1) If there is a node that pretend to receive a slot, or to migrate a -# slot, but has no entries in that slot, fix it. -# 2) If there is a node having keys in slots that are not owned by it -# fix this condition moving the entries in the same node. -# 3) Perform more possibly slow tests about the state of the cluster. -# 4) When aborted slot migration is detected, fix it. - -require 'rubygems' -require 'redis' - -ClusterHashSlots = 16384 -MigrateDefaultTimeout = 60000 -MigrateDefaultPipeline = 10 -RebalanceDefaultThreshold = 2 - -$verbose = false - -def xputs(s) - case s[0..2] - when ">>>" - color="29;1" - when "[ER" - color="31;1" - when "[WA" - color="31;1" - when "[OK" - color="32" - when "[FA","***" - color="33" - else - color=nil - end - - color = nil if ENV['TERM'] != "xterm" - print "\033[#{color}m" if color - print s - print "\033[0m" if color - print "\n" -end - -class ClusterNode - def initialize(addr, password) - s = addr.split("@")[0].split(":") - if s.length < 2 - puts "Invalid IP or Port (given as #{addr}) - use IP:Port format" - exit 1 - end - port = s.pop # removes port from split array - ip = s.join(":") # if s.length > 1 here, it's IPv6, so restore address - @r = nil - @info = {} - @info[:host] = ip - @info[:port] = port - @info[:slots] = {} - @info[:migrating] = {} - @info[:importing] = {} - @info[:replicate] = false - @info[:password] = password - @dirty = false # True if we need to flush slots info into node. - @friends = [] - end - - def friends - @friends - end - - def slots - @info[:slots] - end - - def has_flag?(flag) - @info[:flags].index(flag) - end - - def to_s - "#{@info[:host]}:#{@info[:port]}" - end - - def connect(o={}) - return if @r - print "Connecting to node #{self}: " if $verbose - STDOUT.flush - begin - @r = Redis.new(:host => @info[:host], :port => @info[:port], :timeout => 60) - - if @info[:password] - @r.auth(@info[:password]) - end - - @r.ping - rescue - xputs "[ERR] Sorry, can't connect to node #{self}" - exit 1 if o[:abort] - @r = nil - end - xputs "OK" if $verbose - end - - def assert_cluster - info = @r.info - if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0 - xputs "[ERR] Node #{self} is not configured as a cluster node." - exit 1 - end - end - - def assert_empty - if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) || - (@r.info['db0']) - xputs "[ERR] Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0." - exit 1 - end - end - - def load_info(o={}) - self.connect - nodes = @r.cluster("nodes").split("\n") - nodes.each{|n| - # name addr flags role ping_sent ping_recv link_status slots - split = n.split - name,addr,flags,master_id,ping_sent,ping_recv,config_epoch,link_status = split[0..6] - slots = split[8..-1] - info = { - :name => name, - :addr => addr, - :flags => flags.split(","), - :replicate => master_id, - :ping_sent => ping_sent.to_i, - :ping_recv => ping_recv.to_i, - :link_status => link_status - } - info[:replicate] = false if master_id == "-" - - if info[:flags].index("myself") - @info = @info.merge(info) - @info[:slots] = {} - slots.each{|s| - if s[0..0] == '[' - if s.index("->-") # Migrating - slot,dst = s[1..-1].split("->-") - @info[:migrating][slot.to_i] = dst - elsif s.index("-<-") # Importing - slot,src = s[1..-1].split("-<-") - @info[:importing][slot.to_i] = src - end - elsif s.index("-") - start,stop = s.split("-") - self.add_slots((start.to_i)..(stop.to_i)) - else - self.add_slots((s.to_i)..(s.to_i)) - end - } if slots - @dirty = false - @r.cluster("info").split("\n").each{|e| - k,v=e.split(":") - k = k.to_sym - v.chop! - if k != :cluster_state - @info[k] = v.to_i - else - @info[k] = v - end - } - elsif o[:getfriends] - @friends << info - end - } - end - - def add_slots(slots) - slots.each{|s| - @info[:slots][s] = :new - } - @dirty = true - end - - def set_as_replica(node_id) - @info[:replicate] = node_id - @dirty = true - end - - def flush_node_config - return if !@dirty - if @info[:replicate] - begin - @r.cluster("replicate",@info[:replicate]) - rescue - # If the cluster did not already joined it is possible that - # the slave does not know the master node yet. So on errors - # we return ASAP leaving the dirty flag set, to flush the - # config later. - return - end - else - new = [] - @info[:slots].each{|s,val| - if val == :new - new << s - @info[:slots][s] = true - end - } - @r.cluster("addslots",*new) - end - @dirty = false - end - - def info_string - # We want to display the hash slots assigned to this node - # as ranges, like in: "1-5,8-9,20-25,30" - # - # Note: this could be easily written without side effects, - # we use 'slots' just to split the computation into steps. - - # First step: we want an increasing array of integers - # for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30] - slots = @info[:slots].keys.sort - - # As we want to aggregate adjacent slots we convert all the - # slot integers into ranges (with just one element) - # So we have something like [1..1,2..2, ... and so forth. - slots.map!{|x| x..x} - - # Finally we group ranges with adjacent elements. - slots = slots.reduce([]) {|a,b| - if !a.empty? && b.first == (a[-1].last)+1 - a[0..-2] + [(a[-1].first)..(b.last)] - else - a + [b] - end - } - - # Now our task is easy, we just convert ranges with just one - # element into a number, and a real range into a start-end format. - # Finally we join the array using the comma as separator. - slots = slots.map{|x| - x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}" - }.join(",") - - role = self.has_flag?("master") ? "M" : "S" - - if self.info[:replicate] and @dirty - is = "S: #{self.info[:name]} #{self.to_s}" - else - is = "#{role}: #{self.info[:name]} #{self.to_s}\n"+ - " slots:#{slots} (#{self.slots.length} slots) "+ - "#{(self.info[:flags]-["myself"]).join(",")}" - end - if self.info[:replicate] - is += "\n replicates #{info[:replicate]}" - elsif self.has_flag?("master") && self.info[:replicas] - is += "\n #{info[:replicas].length} additional replica(s)" - end - is - end - - # Return a single string representing nodes and associated slots. - # TODO: remove slaves from config when slaves will be handled - # by Redis Cluster. - def get_config_signature - config = [] - @r.cluster("nodes").each_line{|l| - s = l.split - slots = s[8..-1].select {|x| x[0..0] != "["} - next if slots.length == 0 - config << s[0]+":"+(slots.sort.join(",")) - } - config.sort.join("|") - end - - def info - @info - end - - def is_dirty? - @dirty - end - - def r - @r - end -end - -class RedisTrib - def initialize - @nodes = [] - @fix = false - @errors = [] - @timeout = MigrateDefaultTimeout - end - - def check_arity(req_args, num_args) - if ((req_args > 0 and num_args != req_args) || - (req_args < 0 and num_args < req_args.abs)) - xputs "[ERR] Wrong number of arguments for specified sub command" - exit 1 - end - end - - def add_node(node) - @nodes << node - end - - def reset_nodes - @nodes = [] - end - - def cluster_error(msg) - @errors << msg - xputs msg - end - - # Return the node with the specified ID or Nil. - def get_node_by_name(name) - @nodes.each{|n| - return n if n.info[:name] == name.downcase - } - return nil - end - - # Like get_node_by_name but the specified name can be just the first - # part of the node ID as long as the prefix in unique across the - # cluster. - def get_node_by_abbreviated_name(name) - l = name.length - candidates = [] - @nodes.each{|n| - if n.info[:name][0...l] == name.downcase - candidates << n - end - } - return nil if candidates.length != 1 - candidates[0] - end - - # This function returns the master that has the least number of replicas - # in the cluster. If there are multiple masters with the same smaller - # number of replicas, one at random is returned. - def get_master_with_least_replicas - masters = @nodes.select{|n| n.has_flag? "master"} - sorted = masters.sort{|a,b| - a.info[:replicas].length <=> b.info[:replicas].length - } - sorted[0] - end - - def check_cluster(opt={}) - xputs ">>> Performing Cluster Check (using node #{@nodes[0]})" - show_nodes if !opt[:quiet] - check_config_consistency - check_open_slots - check_slots_coverage - end - - def show_cluster_info - masters = 0 - keys = 0 - @nodes.each{|n| - if n.has_flag?("master") - puts "#{n} (#{n.info[:name][0...8]}...) -> #{n.r.dbsize} keys | #{n.slots.length} slots | "+ - "#{n.info[:replicas].length} slaves." - masters += 1 - keys += n.r.dbsize - end - } - xputs "[OK] #{keys} keys in #{masters} masters." - keys_per_slot = sprintf("%.2f",keys/16384.0) - puts "#{keys_per_slot} keys per slot on average." - end - - # Merge slots of every known node. If the resulting slots are equal - # to ClusterHashSlots, then all slots are served. - def covered_slots - slots = {} - @nodes.each{|n| - slots = slots.merge(n.slots) - } - slots - end - - def check_slots_coverage - xputs ">>> Check slots coverage..." - slots = covered_slots - if slots.length == ClusterHashSlots - xputs "[OK] All #{ClusterHashSlots} slots covered." - else - cluster_error \ - "[ERR] Not all #{ClusterHashSlots} slots are covered by nodes." - fix_slots_coverage if @fix - end - end - - def check_open_slots - xputs ">>> Check for open slots..." - open_slots = [] - @nodes.each{|n| - if n.info[:migrating].size > 0 - cluster_error \ - "[WARNING] Node #{n} has slots in migrating state (#{n.info[:migrating].keys.join(",")})." - open_slots += n.info[:migrating].keys - end - if n.info[:importing].size > 0 - cluster_error \ - "[WARNING] Node #{n} has slots in importing state (#{n.info[:importing].keys.join(",")})." - open_slots += n.info[:importing].keys - end - } - open_slots.uniq! - if open_slots.length > 0 - xputs "[WARNING] The following slots are open: #{open_slots.join(",")}" - end - if @fix - open_slots.each{|slot| fix_open_slot slot} - end - end - - def nodes_with_keys_in_slot(slot) - nodes = [] - @nodes.each{|n| - next if n.has_flag?("slave") - nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0 - } - nodes - end - - def fix_slots_coverage - not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys - xputs ">>> Fixing slots coverage..." - xputs "List of not covered slots: " + not_covered.join(",") - - # For every slot, take action depending on the actual condition: - # 1) No node has keys for this slot. - # 2) A single node has keys for this slot. - # 3) Multiple nodes have keys for this slot. - slots = {} - not_covered.each{|slot| - nodes = nodes_with_keys_in_slot(slot) - slots[slot] = nodes - xputs "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join(", ")}" - } - - none = slots.select {|k,v| v.length == 0} - single = slots.select {|k,v| v.length == 1} - multi = slots.select {|k,v| v.length > 1} - - # Handle case "1": keys in no node. - if none.length > 0 - xputs "The folowing uncovered slots have no keys across the cluster:" - xputs none.keys.join(",") - yes_or_die "Fix these slots by covering with a random node?" - none.each{|slot,nodes| - node = @nodes.sample - xputs ">>> Covering slot #{slot} with #{node}" - node.r.cluster("addslots",slot) - } - end - - # Handle case "2": keys only in one node. - if single.length > 0 - xputs "The folowing uncovered slots have keys in just one node:" - puts single.keys.join(",") - yes_or_die "Fix these slots by covering with those nodes?" - single.each{|slot,nodes| - xputs ">>> Covering slot #{slot} with #{nodes[0]}" - nodes[0].r.cluster("addslots",slot) - } - end - - # Handle case "3": keys in multiple nodes. - if multi.length > 0 - xputs "The folowing uncovered slots have keys in multiple nodes:" - xputs multi.keys.join(",") - yes_or_die "Fix these slots by moving keys into a single node?" - multi.each{|slot,nodes| - target = get_node_with_most_keys_in_slot(nodes,slot) - xputs ">>> Covering slot #{slot} moving keys to #{target}" - - target.r.cluster('addslots',slot) - target.r.cluster('setslot',slot,'stable') - nodes.each{|src| - next if src == target - # Set the source node in 'importing' state (even if we will - # actually migrate keys away) in order to avoid receiving - # redirections for MIGRATE. - src.r.cluster('setslot',slot,'importing',target.info[:name]) - move_slot(src,target,slot,:dots=>true,:fix=>true,:cold=>true) - src.r.cluster('setslot',slot,'stable') - } - } - end - end - - # Return the owner of the specified slot - def get_slot_owners(slot) - owners = [] - @nodes.each{|n| - next if n.has_flag?("slave") - n.slots.each{|s,_| - owners << n if s == slot - } - } - owners - end - - # Return the node, among 'nodes' with the greatest number of keys - # in the specified slot. - def get_node_with_most_keys_in_slot(nodes,slot) - best = nil - best_numkeys = 0 - @nodes.each{|n| - next if n.has_flag?("slave") - numkeys = n.r.cluster("countkeysinslot",slot) - if numkeys > best_numkeys || best == nil - best = n - best_numkeys = numkeys - end - } - return best - end - - # Slot 'slot' was found to be in importing or migrating state in one or - # more nodes. This function fixes this condition by migrating keys where - # it seems more sensible. - def fix_open_slot(slot) - puts ">>> Fixing open slot #{slot}" - - # Try to obtain the current slot owner, according to the current - # nodes configuration. - owners = get_slot_owners(slot) - owner = owners[0] if owners.length == 1 - - migrating = [] - importing = [] - @nodes.each{|n| - next if n.has_flag? "slave" - if n.info[:migrating][slot] - migrating << n - elsif n.info[:importing][slot] - importing << n - elsif n.r.cluster("countkeysinslot",slot) > 0 && n != owner - xputs "*** Found keys about slot #{slot} in node #{n}!" - importing << n - end - } - puts "Set as migrating in: #{migrating.join(",")}" - puts "Set as importing in: #{importing.join(",")}" - - # If there is no slot owner, set as owner the slot with the biggest - # number of keys, among the set of migrating / importing nodes. - if !owner - xputs ">>> Nobody claims ownership, selecting an owner..." - owner = get_node_with_most_keys_in_slot(@nodes,slot) - - # If we still don't have an owner, we can't fix it. - if !owner - xputs "[ERR] Can't select a slot owner. Impossible to fix." - exit 1 - end - - # Use ADDSLOTS to assign the slot. - puts "*** Configuring #{owner} as the slot owner" - owner.r.cluster("setslot",slot,"stable") - owner.r.cluster("addslots",slot) - # Make sure this information will propagate. Not strictly needed - # since there is no past owner, so all the other nodes will accept - # whatever epoch this node will claim the slot with. - owner.r.cluster("bumpepoch") - - # Remove the owner from the list of migrating/importing - # nodes. - migrating.delete(owner) - importing.delete(owner) - end - - # If there are multiple owners of the slot, we need to fix it - # so that a single node is the owner and all the other nodes - # are in importing state. Later the fix can be handled by one - # of the base cases above. - # - # Note that this case also covers multiple nodes having the slot - # in migrating state, since migrating is a valid state only for - # slot owners. - if owners.length > 1 - owner = get_node_with_most_keys_in_slot(owners,slot) - owners.each{|n| - next if n == owner - n.r.cluster('delslots',slot) - n.r.cluster('setslot',slot,'importing',owner.info[:name]) - importing.delete(n) # Avoid duplciates - importing << n - } - owner.r.cluster('bumpepoch') - end - - # Case 1: The slot is in migrating state in one slot, and in - # importing state in 1 slot. That's trivial to address. - if migrating.length == 1 && importing.length == 1 - move_slot(migrating[0],importing[0],slot,:dots=>true,:fix=>true) - # Case 2: There are multiple nodes that claim the slot as importing, - # they probably got keys about the slot after a restart so opened - # the slot. In this case we just move all the keys to the owner - # according to the configuration. - elsif migrating.length == 0 && importing.length > 0 - xputs ">>> Moving all the #{slot} slot keys to its owner #{owner}" - importing.each {|node| - next if node == owner - move_slot(node,owner,slot,:dots=>true,:fix=>true,:cold=>true) - xputs ">>> Setting #{slot} as STABLE in #{node}" - node.r.cluster("setslot",slot,"stable") - } - # Case 3: There are no slots claiming to be in importing state, but - # there is a migrating node that actually don't have any key. We - # can just close the slot, probably a reshard interrupted in the middle. - elsif importing.length == 0 && migrating.length == 1 && - migrating[0].r.cluster("getkeysinslot",slot,10).length == 0 - migrating[0].r.cluster("setslot",slot,"stable") - else - xputs "[ERR] Sorry, Redis-trib can't fix this slot yet (work in progress). Slot is set as migrating in #{migrating.join(",")}, as importing in #{importing.join(",")}, owner is #{owner}" - end - end - - # Check if all the nodes agree about the cluster configuration - def check_config_consistency - if !is_config_consistent? - cluster_error "[ERR] Nodes don't agree about configuration!" - else - xputs "[OK] All nodes agree about slots configuration." - end - end - - def is_config_consistent? - signatures=[] - @nodes.each{|n| - signatures << n.get_config_signature - } - return signatures.uniq.length == 1 - end - - def wait_cluster_join - print "Waiting for the cluster to join" - while !is_config_consistent? - print "." - STDOUT.flush - sleep 1 - end - print "\n" - end - - def alloc_slots - nodes_count = @nodes.length - masters_count = @nodes.length / (@replicas+1) - masters = [] - - # The first step is to split instances by IP. This is useful as - # we'll try to allocate master nodes in different physical machines - # (as much as possible) and to allocate slaves of a given master in - # different physical machines as well. - # - # This code assumes just that if the IP is different, than it is more - # likely that the instance is running in a different physical host - # or at least a different virtual machine. - ips = {} - @nodes.each{|n| - ips[n.info[:host]] = [] if !ips[n.info[:host]] - ips[n.info[:host]] << n - } - - # Select master instances - puts "Using #{masters_count} masters:" - interleaved = [] - stop = false - while not stop do - # Take one node from each IP until we run out of nodes - # across every IP. - ips.each do |ip,nodes| - if nodes.empty? - # if this IP has no remaining nodes, check for termination - if interleaved.length == nodes_count - # stop when 'interleaved' has accumulated all nodes - stop = true - next - end - else - # else, move one node from this IP to 'interleaved' - interleaved.push nodes.shift - end - end - end - - masters = interleaved.slice!(0, masters_count) - nodes_count -= masters.length - - masters.each{|m| puts m} - - # Alloc slots on masters - slots_per_node = ClusterHashSlots.to_f / masters_count - first = 0 - cursor = 0.0 - masters.each_with_index{|n,masternum| - last = (cursor+slots_per_node-1).round - if last > ClusterHashSlots || masternum == masters.length-1 - last = ClusterHashSlots-1 - end - last = first if last < first # Min step is 1. - n.add_slots first..last - first = last+1 - cursor += slots_per_node - } - - # Select N replicas for every master. - # We try to split the replicas among all the IPs with spare nodes - # trying to avoid the host where the master is running, if possible. - # - # Note we loop two times. The first loop assigns the requested - # number of replicas to each master. The second loop assigns any - # remaining instances as extra replicas to masters. Some masters - # may end up with more than their requested number of replicas, but - # all nodes will be used. - assignment_verbose = false - - [:requested,:unused].each do |assign| - masters.each do |m| - assigned_replicas = 0 - while assigned_replicas < @replicas - break if nodes_count == 0 - if assignment_verbose - if assign == :requested - puts "Requesting total of #{@replicas} replicas " \ - "(#{assigned_replicas} replicas assigned " \ - "so far with #{nodes_count} total remaining)." - elsif assign == :unused - puts "Assigning extra instance to replication " \ - "role too (#{nodes_count} remaining)." - end - end - - # Return the first node not matching our current master - node = interleaved.find{|n| n.info[:host] != m.info[:host]} - - # If we found a node, use it as a best-first match. - # Otherwise, we didn't find a node on a different IP, so we - # go ahead and use a same-IP replica. - if node - slave = node - interleaved.delete node - else - slave = interleaved.shift - end - slave.set_as_replica(m.info[:name]) - nodes_count -= 1 - assigned_replicas += 1 - puts "Adding replica #{slave} to #{m}" - - # If we are in the "assign extra nodes" loop, - # we want to assign one extra replica to each - # master before repeating masters. - # This break lets us assign extra replicas to masters - # in a round-robin way. - break if assign == :unused - end - end - end - end - - def flush_nodes_config - @nodes.each{|n| - n.flush_node_config - } - end - - def show_nodes - @nodes.each{|n| - xputs n.info_string - } - end - - # Redis Cluster config epoch collision resolution code is able to eventually - # set a different epoch to each node after a new cluster is created, but - # it is slow compared to assign a progressive config epoch to each node - # before joining the cluster. However we do just a best-effort try here - # since if we fail is not a problem. - def assign_config_epoch - config_epoch = 1 - @nodes.each{|n| - begin - n.r.cluster("set-config-epoch",config_epoch) - rescue - end - config_epoch += 1 - } - end - - def join_cluster - # We use a brute force approach to make sure the node will meet - # each other, that is, sending CLUSTER MEET messages to all the nodes - # about the very same node. - # Thanks to gossip this information should propagate across all the - # cluster in a matter of seconds. - first = false - @nodes.each{|n| - if !first then first = n.info; next; end # Skip the first node - n.r.cluster("meet",first[:host],first[:port]) - } - end - - def yes_or_die(msg) - print "#{msg} (type 'yes' to accept): " - STDOUT.flush - if !(STDIN.gets.chomp.downcase == "yes") - xputs "*** Aborting..." - exit 1 - end - end - - def load_cluster_info_from_node(nodeaddr, password) - node = ClusterNode.new(nodeaddr, password) - node.connect(:abort => true) - node.assert_cluster - node.load_info(:getfriends => true) - add_node(node) - node.friends.each{|f| - next if f[:flags].index("noaddr") || - f[:flags].index("disconnected") || - f[:flags].index("fail") - fnode = ClusterNode.new(f[:addr], password) - fnode.connect() - next if !fnode.r - begin - fnode.load_info() - add_node(fnode) - rescue => e - xputs "[ERR] Unable to load info for node #{fnode}" - end - } - populate_nodes_replicas_info - end - - # This function is called by load_cluster_info_from_node in order to - # add additional information to every node as a list of replicas. - def populate_nodes_replicas_info - # Start adding the new field to every node. - @nodes.each{|n| - n.info[:replicas] = [] - } - - # Populate the replicas field using the replicate field of slave - # nodes. - @nodes.each{|n| - if n.info[:replicate] - master = get_node_by_name(n.info[:replicate]) - if !master - xputs "*** WARNING: #{n} claims to be slave of unknown node ID #{n.info[:replicate]}." - else - master.info[:replicas] << n - end - end - } - end - - # Given a list of source nodes return a "resharding plan" - # with what slots to move in order to move "numslots" slots to another - # instance. - def compute_reshard_table(sources,numslots) - moved = [] - # Sort from bigger to smaller instance, for two reasons: - # 1) If we take less slots than instances it is better to start - # getting from the biggest instances. - # 2) We take one slot more from the first instance in the case of not - # perfect divisibility. Like we have 3 nodes and need to get 10 - # slots, we take 4 from the first, and 3 from the rest. So the - # biggest is always the first. - sources = sources.sort{|a,b| b.slots.length <=> a.slots.length} - source_tot_slots = sources.inject(0) {|sum,source| - sum+source.slots.length - } - sources.each_with_index{|s,i| - # Every node will provide a number of slots proportional to the - # slots it has assigned. - n = (numslots.to_f/source_tot_slots*s.slots.length) - if i == 0 - n = n.ceil - else - n = n.floor - end - s.slots.keys.sort[(0...n)].each{|slot| - if moved.length < numslots - moved << {:source => s, :slot => slot} - end - } - } - return moved - end - - def show_reshard_table(table) - table.each{|e| - puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}" - } - end - - # Move slots between source and target nodes using MIGRATE. - # - # Options: - # :verbose -- Print a dot for every moved key. - # :fix -- We are moving in the context of a fix. Use REPLACE. - # :cold -- Move keys without opening slots / reconfiguring the nodes. - # :update -- Update nodes.info[:slots] for source/target nodes. - # :quiet -- Don't print info messages. - def move_slot(source,target,slot,o={}) - o = {:pipeline => MigrateDefaultPipeline}.merge(o) - - # We start marking the slot as importing in the destination node, - # and the slot as migrating in the target host. Note that the order of - # the operations is important, as otherwise a client may be redirected - # to the target node that does not yet know it is importing this slot. - if !o[:quiet] - print "Moving slot #{slot} from #{source} to #{target}: " - STDOUT.flush - end - - if !o[:cold] - target.r.cluster("setslot",slot,"importing",source.info[:name]) - source.r.cluster("setslot",slot,"migrating",target.info[:name]) - end - # Migrate all the keys from source to target using the MIGRATE command - while true - keys = source.r.cluster("getkeysinslot",slot,o[:pipeline]) - break if keys.length == 0 - begin - source.r.client.call(["migrate",target.info[:host],target.info[:port],"",0,@timeout,:keys,*keys]) - rescue => e - if o[:fix] && e.to_s =~ /BUSYKEY/ - xputs "*** Target key exists. Replacing it for FIX." - source.r.client.call(["migrate",target.info[:host],target.info[:port],"",0,@timeout,:replace,:keys,*keys]) - else - puts "" - xputs "[ERR] Calling MIGRATE: #{e}" - exit 1 - end - end - print "."*keys.length if o[:dots] - STDOUT.flush - end - - puts if !o[:quiet] - # Set the new node as the owner of the slot in all the known nodes. - if !o[:cold] - @nodes.each{|n| - next if n.has_flag?("slave") - n.r.cluster("setslot",slot,"node",target.info[:name]) - } - end - - # Update the node logical config - if o[:update] then - source.info[:slots].delete(slot) - target.info[:slots][slot] = true - end - end - - # redis-trib subcommands implementations. - - def check_cluster_cmd(argv,opt) - load_cluster_info_from_node(argv[0], opt['password']) - check_cluster - end - - def info_cluster_cmd(argv,opt) - load_cluster_info_from_node(argv[0], opt['password']) - show_cluster_info - end - - def rebalance_cluster_cmd(argv,opt) - opt = { - 'pipeline' => MigrateDefaultPipeline, - 'threshold' => RebalanceDefaultThreshold - }.merge(opt) - - # Load nodes info before parsing options, otherwise we can't - # handle --weight. - load_cluster_info_from_node(argv[0], opt['password']) - - # Options parsing - threshold = opt['threshold'].to_i - autoweights = opt['auto-weights'] - weights = {} - opt['weight'].each{|w| - fields = w.split("=") - node = get_node_by_abbreviated_name(fields[0]) - if !node || !node.has_flag?("master") - puts "*** No such master node #{fields[0]}" - exit 1 - end - weights[node.info[:name]] = fields[1].to_f - } if opt['weight'] - useempty = opt['use-empty-masters'] - - # Assign a weight to each node, and compute the total cluster weight. - total_weight = 0 - nodes_involved = 0 - @nodes.each{|n| - if n.has_flag?("master") - next if !useempty && n.slots.length == 0 - n.info[:w] = weights[n.info[:name]] ? weights[n.info[:name]] : 1 - total_weight += n.info[:w] - nodes_involved += 1 - end - } - - # Check cluster, only proceed if it looks sane. - check_cluster(:quiet => true) - if @errors.length != 0 - puts "*** Please fix your cluster problems before rebalancing" - exit 1 - end - - # Calculate the slots balance for each node. It's the number of - # slots the node should lose (if positive) or gain (if negative) - # in order to be balanced. - threshold = opt['threshold'].to_f - threshold_reached = false - @nodes.each{|n| - if n.has_flag?("master") - next if !n.info[:w] - expected = ((ClusterHashSlots.to_f / total_weight) * - n.info[:w]).to_i - n.info[:balance] = n.slots.length - expected - # Compute the percentage of difference between the - # expected number of slots and the real one, to see - # if it's over the threshold specified by the user. - over_threshold = false - if threshold > 0 - if n.slots.length > 0 - err_perc = (100-(100.0*expected/n.slots.length)).abs - over_threshold = true if err_perc > threshold - elsif expected > 0 - over_threshold = true - end - end - threshold_reached = true if over_threshold - end - } - if !threshold_reached - xputs "*** No rebalancing needed! All nodes are within the #{threshold}% threshold." - return - end - - # Only consider nodes we want to change - sn = @nodes.select{|n| - n.has_flag?("master") && n.info[:w] - } - - # Because of rounding, it is possible that the balance of all nodes - # summed does not give 0. Make sure that nodes that have to provide - # slots are always matched by nodes receiving slots. - total_balance = sn.map{|x| x.info[:balance]}.reduce{|a,b| a+b} - while total_balance > 0 - sn.each{|n| - if n.info[:balance] < 0 && total_balance > 0 - n.info[:balance] -= 1 - total_balance -= 1 - end - } - end - - # Sort nodes by their slots balance. - sn = sn.sort{|a,b| - a.info[:balance] <=> b.info[:balance] - } - - xputs ">>> Rebalancing across #{nodes_involved} nodes. Total weight = #{total_weight}" - - if $verbose - sn.each{|n| - puts "#{n} balance is #{n.info[:balance]} slots" - } - end - - # Now we have at the start of the 'sn' array nodes that should get - # slots, at the end nodes that must give slots. - # We take two indexes, one at the start, and one at the end, - # incrementing or decrementing the indexes accordingly til we - # find nodes that need to get/provide slots. - dst_idx = 0 - src_idx = sn.length - 1 - - while dst_idx < src_idx - dst = sn[dst_idx] - src = sn[src_idx] - numslots = [dst.info[:balance],src.info[:balance]].map{|n| - n.abs - }.min - - if numslots > 0 - puts "Moving #{numslots} slots from #{src} to #{dst}" - - # Actaully move the slots. - reshard_table = compute_reshard_table([src],numslots) - if reshard_table.length != numslots - xputs "*** Assertio failed: Reshard table != number of slots" - exit 1 - end - if opt['simulate'] - print "#"*reshard_table.length - else - reshard_table.each{|e| - move_slot(e[:source],dst,e[:slot], - :quiet=>true, - :dots=>false, - :update=>true, - :pipeline=>opt['pipeline']) - print "#" - STDOUT.flush - } - end - puts - end - - # Update nodes balance. - dst.info[:balance] += numslots - src.info[:balance] -= numslots - dst_idx += 1 if dst.info[:balance] == 0 - src_idx -= 1 if src.info[:balance] == 0 - end - end - - def fix_cluster_cmd(argv,opt) - @fix = true - @timeout = opt['timeout'].to_i if opt['timeout'] - - load_cluster_info_from_node(argv[0], opt['password']) - check_cluster - end - - def reshard_cluster_cmd(argv,opt) - opt = {'pipeline' => MigrateDefaultPipeline}.merge(opt) - - load_cluster_info_from_node(argv[0], opt['password']) - check_cluster - if @errors.length != 0 - puts "*** Please fix your cluster problems before resharding" - exit 1 - end - - @timeout = opt['timeout'].to_i if opt['timeout'].to_i - - # Get number of slots - if opt['slots'] - numslots = opt['slots'].to_i - else - numslots = 0 - while numslots <= 0 or numslots > ClusterHashSlots - print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? " - numslots = STDIN.gets.to_i - end - end - - # Get the target instance - if opt['to'] - target = get_node_by_name(opt['to']) - if !target || target.has_flag?("slave") - xputs "*** The specified node is not known or not a master, please retry." - exit 1 - end - else - target = nil - while not target - print "What is the receiving node ID? " - target = get_node_by_name(STDIN.gets.chop) - if !target || target.has_flag?("slave") - xputs "*** The specified node is not known or not a master, please retry." - target = nil - end - end - end - - # Get the source instances - sources = [] - if opt['from'] - opt['from'].split(',').each{|node_id| - if node_id == "all" - sources = "all" - break - end - src = get_node_by_name(node_id) - if !src || src.has_flag?("slave") - xputs "*** The specified node is not known or is not a master, please retry." - exit 1 - end - sources << src - } - else - xputs "Please enter all the source node IDs." - xputs " Type 'all' to use all the nodes as source nodes for the hash slots." - xputs " Type 'done' once you entered all the source nodes IDs." - while true - print "Source node ##{sources.length+1}:" - line = STDIN.gets.chop - src = get_node_by_name(line) - if line == "done" - break - elsif line == "all" - sources = "all" - break - elsif !src || src.has_flag?("slave") - xputs "*** The specified node is not known or is not a master, please retry." - elsif src.info[:name] == target.info[:name] - xputs "*** It is not possible to use the target node as source node." - else - sources << src - end - end - end - - if sources.length == 0 - puts "*** No source nodes given, operation aborted" - exit 1 - end - - # Handle soures == all. - if sources == "all" - sources = [] - @nodes.each{|n| - next if n.info[:name] == target.info[:name] - next if n.has_flag?("slave") - sources << n - } - end - - # Check if the destination node is the same of any source nodes. - if sources.index(target) - xputs "*** Target node is also listed among the source nodes!" - exit 1 - end - - puts "\nReady to move #{numslots} slots." - puts " Source nodes:" - sources.each{|s| puts " "+s.info_string} - puts " Destination node:" - puts " #{target.info_string}" - reshard_table = compute_reshard_table(sources,numslots) - puts " Resharding plan:" - show_reshard_table(reshard_table) - if !opt['yes'] - print "Do you want to proceed with the proposed reshard plan (yes/no)? " - yesno = STDIN.gets.chop - exit(1) if (yesno != "yes") - end - reshard_table.each{|e| - move_slot(e[:source],target,e[:slot], - :dots=>true, - :pipeline=>opt['pipeline']) - } - end - - # This is an helper function for create_cluster_cmd that verifies if - # the number of nodes and the specified replicas have a valid configuration - # where there are at least three master nodes and enough replicas per node. - def check_create_parameters - masters = @nodes.length/(@replicas+1) - if masters < 3 - puts "*** ERROR: Invalid configuration for cluster creation." - puts "*** Redis Cluster requires at least 3 master nodes." - puts "*** This is not possible with #{@nodes.length} nodes and #{@replicas} replicas per node." - puts "*** At least #{3*(@replicas+1)} nodes are required." - exit 1 - end - end - - def create_cluster_cmd(argv,opt) - opt = {'replicas' => 0}.merge(opt) - @replicas = opt['replicas'].to_i - - xputs ">>> Creating cluster" - argv[0..-1].each{|n| - node = ClusterNode.new(n, opt['password']) - node.connect(:abort => true) - node.assert_cluster - node.load_info - node.assert_empty - add_node(node) - } - check_create_parameters - xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..." - alloc_slots - show_nodes - yes_or_die "Can I set the above configuration?" - flush_nodes_config - xputs ">>> Nodes configuration updated" - xputs ">>> Assign a different config epoch to each node" - assign_config_epoch - xputs ">>> Sending CLUSTER MEET messages to join the cluster" - join_cluster - # Give one second for the join to start, in order to avoid that - # wait_cluster_join will find all the nodes agree about the config as - # they are still empty with unassigned slots. - sleep 1 - wait_cluster_join - flush_nodes_config # Useful for the replicas - # Reset the node information, so that when the - # final summary is listed in check_cluster about the newly created cluster - # all the nodes would get properly listed as slaves or masters - reset_nodes - load_cluster_info_from_node(argv[0], opt['password']) - check_cluster - end - - def addnode_cluster_cmd(argv,opt) - xputs ">>> Adding node #{argv[0]} to cluster #{argv[1]}" - - # Check the existing cluster - load_cluster_info_from_node(argv[1], opt['password']) - check_cluster - - # If --master-id was specified, try to resolve it now so that we - # abort before starting with the node configuration. - if opt['slave'] - if opt['master-id'] - master = get_node_by_name(opt['master-id']) - if !master - xputs "[ERR] No such master ID #{opt['master-id']}" - end - else - master = get_master_with_least_replicas - xputs "Automatically selected master #{master}" - end - end - - # Add the new node - new = ClusterNode.new(argv[0], opt['password']) - new.connect(:abort => true) - new.assert_cluster - new.load_info - new.assert_empty - first = @nodes.first.info - add_node(new) - - # Send CLUSTER MEET command to the new node - xputs ">>> Send CLUSTER MEET to node #{new} to make it join the cluster." - new.r.cluster("meet",first[:host],first[:port]) - - # Additional configuration is needed if the node is added as - # a slave. - if opt['slave'] - wait_cluster_join - xputs ">>> Configure node as replica of #{master}." - new.r.cluster("replicate",master.info[:name]) - end - xputs "[OK] New node added correctly." - end - - def delnode_cluster_cmd(argv,opt) - id = argv[1].downcase - xputs ">>> Removing node #{id} from cluster #{argv[0]}" - - # Load cluster information - load_cluster_info_from_node(argv[0], opt['password']) - - # Check if the node exists and is not empty - node = get_node_by_name(id) - - if !node - xputs "[ERR] No such node ID #{id}" - exit 1 - end - - if node.slots.length != 0 - xputs "[ERR] Node #{node} is not empty! Reshard data away and try again." - exit 1 - end - - # Send CLUSTER FORGET to all the nodes but the node to remove - xputs ">>> Sending CLUSTER FORGET messages to the cluster..." - @nodes.each{|n| - next if n == node - if n.info[:replicate] && n.info[:replicate].downcase == id - # Reconfigure the slave to replicate with some other node - master = get_master_with_least_replicas - xputs ">>> #{n} as replica of #{master}" - n.r.cluster("replicate",master.info[:name]) - end - n.r.cluster("forget",argv[1]) - } - - # Finally shutdown the node - xputs ">>> SHUTDOWN the node." - node.r.shutdown - end - - def set_timeout_cluster_cmd(argv,opt) - timeout = argv[1].to_i - if timeout < 100 - puts "Setting a node timeout of less than 100 milliseconds is a bad idea." - exit 1 - end - - # Load cluster information - load_cluster_info_from_node(argv[0], opt['password']) - ok_count = 0 - err_count = 0 - - # Send CLUSTER FORGET to all the nodes but the node to remove - xputs ">>> Reconfiguring node timeout in every cluster node..." - @nodes.each{|n| - begin - n.r.config("set","cluster-node-timeout",timeout) - n.r.config("rewrite") - ok_count += 1 - xputs "*** New timeout set for #{n}" - rescue => e - puts "ERR setting node-timeot for #{n}: #{e}" - err_count += 1 - end - } - xputs ">>> New node timeout set. #{ok_count} OK, #{err_count} ERR." - end - - def call_cluster_cmd(argv,opt) - cmd = argv[1..-1] - cmd[0] = cmd[0].upcase - - # Load cluster information - load_cluster_info_from_node(argv[0], opt['password']) - xputs ">>> Calling #{cmd.join(" ")}" - @nodes.each{|n| - begin - res = n.r.send(*cmd) - puts "#{n}: #{res}" - rescue => e - puts "#{n}: #{e}" - end - } - end - - def import_cluster_cmd(argv,opt) - source_addr = opt['from'] - xputs ">>> Importing data from #{source_addr} to cluster #{argv[1]}" - use_copy = opt['copy'] - use_replace = opt['replace'] - - # Check the existing cluster. - load_cluster_info_from_node(argv[0]) - check_cluster - - # Connect to the source node. - xputs ">>> Connecting to the source Redis instance" - src_host,src_port = source_addr.split(":") - source = Redis.new(:host =>src_host, :port =>src_port) - if source.info['cluster_enabled'].to_i == 1 - xputs "[ERR] The source node should not be a cluster node." - end - xputs "*** Importing #{source.dbsize} keys from DB 0" - - # Build a slot -> node map - slots = {} - @nodes.each{|n| - n.slots.each{|s,_| - slots[s] = n - } - } - - # Use SCAN to iterate over the keys, migrating to the - # right node as needed. - cursor = nil - while cursor != 0 - cursor,keys = source.scan(cursor, :count => 1000) - cursor = cursor.to_i - keys.each{|k| - # Migrate keys using the MIGRATE command. - slot = key_to_slot(k) - target = slots[slot] - print "Migrating #{k} to #{target}: " - STDOUT.flush - begin - cmd = ["migrate",target.info[:host],target.info[:port],k,0,@timeout] - cmd << :copy if use_copy - cmd << :replace if use_replace - source.client.call(cmd) - rescue => e - puts e - else - puts "OK" - end - } - end - end - - def help_cluster_cmd(argv,opt) - show_help - exit 0 - end - - # Parse the options for the specific command "cmd". - # Returns an hash populate with option => value pairs, and the index of - # the first non-option argument in ARGV. - def parse_options(cmd) - idx = 1 ; # Current index into ARGV - options={} - while idx < ARGV.length && ARGV[idx][0..1] == '--' - if ARGV[idx][0..1] == "--" - option = ARGV[idx][2..-1] - idx += 1 - - # --verbose is a global option - if option == "verbose" - $verbose = true - next - end - - if ALLOWED_OPTIONS[cmd] == nil || ALLOWED_OPTIONS[cmd][option] == nil - puts "Unknown option '#{option}' for command '#{cmd}'" - exit 1 - end - if ALLOWED_OPTIONS[cmd][option] != false - value = ARGV[idx] - idx += 1 - else - value = true - end - - # If the option is set to [], it's a multiple arguments - # option. We just queue every new value into an array. - if ALLOWED_OPTIONS[cmd][option] == [] - options[option] = [] if !options[option] - options[option] << value - else - options[option] = value - end - else - # Remaining arguments are not options. - break - end - end - - # Enforce mandatory options - if ALLOWED_OPTIONS[cmd] - ALLOWED_OPTIONS[cmd].each {|option,val| - if !options[option] && val == :required - puts "Option '--#{option}' is required "+ \ - "for subcommand '#{cmd}'" - exit 1 - end - } - end - return options,idx - end -end - -################################################################################# -# Libraries -# -# We try to don't depend on external libs since this is a critical part -# of Redis Cluster. -################################################################################# - -# This is the CRC16 algorithm used by Redis Cluster to hash keys. -# Implementation according to CCITT standards. -# -# This is actually the XMODEM CRC 16 algorithm, using the -# following parameters: -# -# Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" -# Width : 16 bit -# Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) -# Initialization : 0000 -# Reflect Input byte : False -# Reflect Output CRC : False -# Xor constant to output CRC : 0000 -# Output for "123456789" : 31C3 - -module RedisClusterCRC16 - def RedisClusterCRC16.crc16(bytes) - crc = 0 - bytes.each_byte{|b| - crc = ((crc<<8) & 0xffff) ^ XMODEMCRC16Lookup[((crc>>8)^b) & 0xff] - } - crc - end - -private - XMODEMCRC16Lookup = [ - 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, - 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, - 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, - 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, - 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, - 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, - 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, - 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, - 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, - 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, - 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, - 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, - 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, - 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, - 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, - 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, - 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, - 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, - 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, - 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, - 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, - 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, - 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, - 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, - 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, - 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, - 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, - 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, - 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, - 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, - 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, - 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 - ] -end - -# Turn a key name into the corrisponding Redis Cluster slot. -def key_to_slot(key) - # Only hash what is inside {...} if there is such a pattern in the key. - # Note that the specification requires the content that is between - # the first { and the first } after the first {. If we found {} without - # nothing in the middle, the whole key is hashed as usually. - s = key.index "{" - if s - e = key.index "}",s+1 - if e && e != s+1 - key = key[s+1..e-1] - end - end - RedisClusterCRC16.crc16(key) % 16384 -end - -################################################################################# -# Definition of commands -################################################################################# - -COMMANDS={ - "create" => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"], - "check" => ["check_cluster_cmd", -2, "host:port"], - "info" => ["info_cluster_cmd", -2, "host:port"], - "fix" => ["fix_cluster_cmd", -2, "host:port"], - "reshard" => ["reshard_cluster_cmd", -2, "host:port"], - "rebalance" => ["rebalance_cluster_cmd", -2, "host:port"], - "add-node" => ["addnode_cluster_cmd", -3, "new_host:new_port existing_host:existing_port"], - "del-node" => ["delnode_cluster_cmd", -3, "host:port node_id"], - "set-timeout" => ["set_timeout_cluster_cmd", -3, "host:port milliseconds"], - "call" => ["call_cluster_cmd", -3, "host:port command arg arg .. arg"], - "import" => ["import_cluster_cmd", 2, "host:port"], - "help" => ["help_cluster_cmd", 1, "(show this help)"] -} - -ALLOWED_OPTIONS={ - "info" => {"password" => true}, - "check" => {"password" => true}, - "create" => {"replicas" => true, "password" => true}, - "add-node" => {"slave" => false, "master-id" => true, "password" => true}, - "del-node" => {"password" => true}, - "set-timeout" => {"password" => true}, - "import" => {"from" => :required, "copy" => false, "replace" => false}, - "reshard" => {"from" => true, "to" => true, "slots" => true, "yes" => false, "timeout" => true, "pipeline" => true, "password" => true}, - "rebalance" => {"weight" => [], "auto-weights" => false, "use-empty-masters" => false, "timeout" => true, "simulate" => false, "pipeline" => true, "threshold" => true, "password" => true}, - "fix" => {"timeout" => MigrateDefaultTimeout, "password" => true}, -} - -def show_help - puts "Usage: redis-trib \n\n" - COMMANDS.each{|k,v| - puts " #{k.ljust(15)} #{v[2]}" - if ALLOWED_OPTIONS[k] - ALLOWED_OPTIONS[k].each{|optname,has_arg| - puts " --#{optname}" + (has_arg ? " " : "") - } - end - } - puts "\nFor check, fix, reshard, del-node, set-timeout you can specify the host and port of any working node in the cluster.\n" -end - -# Sanity check -if ARGV.length == 0 - show_help - exit 1 -end - -rt = RedisTrib.new -cmd_spec = COMMANDS[ARGV[0].downcase] -if !cmd_spec - puts "Unknown redis-trib subcommand '#{ARGV[0]}'" - exit 1 -end - -# Parse options -cmd_options,first_non_option = rt.parse_options(ARGV[0].downcase) -rt.check_arity(cmd_spec[1],ARGV.length-(first_non_option-1)) - -# Dispatch -rt.send(cmd_spec[0],ARGV[first_non_option..-1],cmd_options) diff --git a/tests/configure/master/redis.conf b/tests/configure/master/redis.conf deleted file mode 100644 index bd31bd5..0000000 --- a/tests/configure/master/redis.conf +++ /dev/null @@ -1,1053 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 0.0.0.0 ::1 -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# bind 0.0.0.0 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -# protected-mode yes - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6301 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -# daemonize yes - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -# pidfile /data/redis.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -# logfile /var/logs/redis-server.log - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -# dir /opt/bitnami/redis/data - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- -# -# New slaves and reconnecting slaves that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more slaves -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer -# will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves -# will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. -# -# This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a masteer. -# -# The listed IP and address normally reported by a slave is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. -# -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# requirepass foobared - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -rename-command FLUSHDB "" -rename-command FLUSHALL "" - -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU cache, or to set -# a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> remove the key with an expire set using an LRU algorithm -# allkeys-lru -> remove any key according to the LRU algorithm -# volatile-random -> remove a random key with an expire set -# allkeys-random -> remove a random key, any key -# volatile-ttl -> remove the key with the nearest expire time (minor TTL) -# noeviction -> don't expire at all, just return an error on write operations -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy noeviction - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. -# -# maxmemory-samples 5 - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ REDIS CLUSTER ############################### -# -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however -# in order to mark it as "mature" we need to wait for a non trivial percentage -# of users to deploy it in production. -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6301.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -cluster-node-timeout 15000 - -# A slave of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a slave to actually have a exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best -# replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single slave computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a slave will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * slave-validity-factor) + repl-ping-slave-period -# -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large slave-validity-factor may allow slaves with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. -# -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-slave-validity-factor 10 - -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. -# -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every -# master in your cluster. -# -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# slave -> slave clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml deleted file mode 100644 index 076e1eb..0000000 --- a/tests/docker-compose.yml +++ /dev/null @@ -1,78 +0,0 @@ -version: '2' - -services: - r7000: - image: 'carwestsam/redis-with-source' - container_name: redis-cluster-entry - ports: - - '8000:7000' - - '8001:7001' - - '8002:7002' - - '8003:7003' - - '8004:7004' - - '8005:7005' - volumes: - - ./configure/7000/redis.conf:/usr/local/etc/redis/redis.conf - command: redis-server /usr/local/etc/redis/redis.conf - platform: linux/amd64 - - r7001: - image: 'carwestsam/redis-with-source' - container_name: redis-cluster-entry-1 - volumes: - - ./configure/7001/redis.conf:/usr/local/etc/redis/redis.conf - command: redis-server /usr/local/etc/redis/redis.conf - network_mode: "service:r7000" - platform: linux/amd64 - - r7002: - image: 'carwestsam/redis-with-source' - container_name: redis-cluster-entry-2 - volumes: - - ./configure/7002/redis.conf:/usr/local/etc/redis/redis.conf - command: redis-server /usr/local/etc/redis/redis.conf - network_mode: "service:r7000" - platform: linux/amd64 - - r7003: - image: 'carwestsam/redis-with-source' - container_name: redis-cluster-entry-3 - volumes: - - ./configure/7003/redis.conf:/usr/local/etc/redis/redis.conf - command: redis-server /usr/local/etc/redis/redis.conf - network_mode: "service:r7000" - platform: linux/amd64 - - r7004: - image: 'carwestsam/redis-with-source' - container_name: redis-cluster-entry-4 - volumes: - - ./configure/7004/redis.conf:/usr/local/etc/redis/redis.conf - command: redis-server /usr/local/etc/redis/redis.conf - network_mode: "service:r7000" - platform: linux/amd64 - - r7005: - image: 'carwestsam/redis-with-source' - container_name: redis-cluster-entry-5 - volumes: - - ./configure/7005/redis.conf:/usr/local/etc/redis/redis.conf - command: redis-server /usr/local/etc/redis/redis.conf - network_mode: "service:r7000" - platform: linux/amd64 - - bmfs2: - image: 'datasignals/bmfs2:v1.0.1' - container_name: bmfs2 - volumes: - - /Users/sabahgowhar/desktop/bmfs2:/tmp/bmfs2 - - /Users/sabahgowhar/desktop/config:/config - environment: - - THEIA_CLOUD_SESSION_USER=Gary - network_mode: "service:r7000" - platform: linux/amd64 - devices: - - "/dev/fuse:/dev/fuse" - cap_add: - - SYS_ADMIN - privileged: true diff --git a/tests/intermediate.sh b/tests/intermediate.sh deleted file mode 100644 index e69de29..0000000 diff --git a/tests/setup.sh b/tests/setup.sh deleted file mode 100755 index 21715aa..0000000 --- a/tests/setup.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -docker-compose up -d - -docker exec redis-cluster-entry /bin/sh -c "echo yes > in.txt && /data/redis-trib.rb create --password myredis --replicas 1 127.0.0.1:8000 127.0.0.1:8001 127.0.0.1:8002 127.0.0.1:8003 127.0.0.1:8004 127.0.0.1:8005 < in.txt" - -echo "Redis cluster started, try: \"redis-cli -c -p 8000 -a myredis\""