setup integration test
This commit is contained in:
8
node_modules/bottleneck/src/redis/blacklist_client.lua
generated
vendored
Normal file
8
node_modules/bottleneck/src/redis/blacklist_client.lua
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
local blacklist = ARGV[num_static_argv + 1]
|
||||
|
||||
if redis.call('zscore', client_last_seen_key, blacklist) then
|
||||
redis.call('zadd', client_last_seen_key, 0, blacklist)
|
||||
end
|
||||
|
||||
|
||||
return {}
|
6
node_modules/bottleneck/src/redis/check.lua
generated
vendored
Normal file
6
node_modules/bottleneck/src/redis/check.lua
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
local weight = tonumber(ARGV[num_static_argv + 1])
|
||||
|
||||
local capacity = process_tick(now, false)['capacity']
|
||||
local nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest'))
|
||||
|
||||
return conditions_check(capacity, weight) and nextRequest - now <= 0
|
3
node_modules/bottleneck/src/redis/conditions_check.lua
generated
vendored
Normal file
3
node_modules/bottleneck/src/redis/conditions_check.lua
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
local conditions_check = function (capacity, weight)
|
||||
return capacity == nil or weight <= capacity
|
||||
end
|
1
node_modules/bottleneck/src/redis/current_reservoir.lua
generated
vendored
Normal file
1
node_modules/bottleneck/src/redis/current_reservoir.lua
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
return process_tick(now, false)['reservoir']
|
3
node_modules/bottleneck/src/redis/done.lua
generated
vendored
Normal file
3
node_modules/bottleneck/src/redis/done.lua
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
process_tick(now, false)
|
||||
|
||||
return tonumber(redis.call('hget', settings_key, 'done'))
|
5
node_modules/bottleneck/src/redis/free.lua
generated
vendored
Normal file
5
node_modules/bottleneck/src/redis/free.lua
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
local index = ARGV[num_static_argv + 1]
|
||||
|
||||
redis.call('zadd', job_expirations_key, 0, index)
|
||||
|
||||
return process_tick(now, false)['running']
|
7
node_modules/bottleneck/src/redis/get_time.lua
generated
vendored
Normal file
7
node_modules/bottleneck/src/redis/get_time.lua
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
redis.replicate_commands()
|
||||
|
||||
local get_time = function ()
|
||||
local time = redis.call('time')
|
||||
|
||||
return tonumber(time[1]..string.sub(time[2], 1, 3))
|
||||
end
|
1
node_modules/bottleneck/src/redis/group_check.lua
generated
vendored
Normal file
1
node_modules/bottleneck/src/redis/group_check.lua
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
return not (redis.call('exists', settings_key) == 1)
|
1
node_modules/bottleneck/src/redis/heartbeat.lua
generated
vendored
Normal file
1
node_modules/bottleneck/src/redis/heartbeat.lua
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
process_tick(now, true)
|
10
node_modules/bottleneck/src/redis/increment_reservoir.lua
generated
vendored
Normal file
10
node_modules/bottleneck/src/redis/increment_reservoir.lua
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
local incr = tonumber(ARGV[num_static_argv + 1])
|
||||
|
||||
redis.call('hincrby', settings_key, 'reservoir', incr)
|
||||
|
||||
local reservoir = process_tick(now, true)['reservoir']
|
||||
|
||||
local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))
|
||||
refresh_expiration(0, 0, groupTimeout)
|
||||
|
||||
return reservoir
|
105
node_modules/bottleneck/src/redis/init.lua
generated
vendored
Normal file
105
node_modules/bottleneck/src/redis/init.lua
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
local clear = tonumber(ARGV[num_static_argv + 1])
|
||||
local limiter_version = ARGV[num_static_argv + 2]
|
||||
local num_local_argv = num_static_argv + 2
|
||||
|
||||
if clear == 1 then
|
||||
redis.call('del', unpack(KEYS))
|
||||
end
|
||||
|
||||
if redis.call('exists', settings_key) == 0 then
|
||||
-- Create
|
||||
local args = {'hmset', settings_key}
|
||||
|
||||
for i = num_local_argv + 1, #ARGV do
|
||||
table.insert(args, ARGV[i])
|
||||
end
|
||||
|
||||
redis.call(unpack(args))
|
||||
redis.call('hmset', settings_key,
|
||||
'nextRequest', now,
|
||||
'lastReservoirRefresh', now,
|
||||
'lastReservoirIncrease', now,
|
||||
'running', 0,
|
||||
'done', 0,
|
||||
'unblockTime', 0,
|
||||
'capacityPriorityCounter', 0
|
||||
)
|
||||
|
||||
else
|
||||
-- Apply migrations
|
||||
local settings = redis.call('hmget', settings_key,
|
||||
'id',
|
||||
'version'
|
||||
)
|
||||
local id = settings[1]
|
||||
local current_version = settings[2]
|
||||
|
||||
if current_version ~= limiter_version then
|
||||
local version_digits = {}
|
||||
for k, v in string.gmatch(current_version, "([^.]+)") do
|
||||
table.insert(version_digits, tonumber(k))
|
||||
end
|
||||
|
||||
-- 2.10.0
|
||||
if version_digits[2] < 10 then
|
||||
redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '')
|
||||
redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '')
|
||||
redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '')
|
||||
redis.call('hsetnx', settings_key, 'done', 0)
|
||||
redis.call('hset', settings_key, 'version', '2.10.0')
|
||||
end
|
||||
|
||||
-- 2.11.1
|
||||
if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then
|
||||
if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then
|
||||
redis.call('hmset', settings_key,
|
||||
'lastReservoirRefresh', now,
|
||||
'version', '2.11.1'
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
-- 2.14.0
|
||||
if version_digits[2] < 14 then
|
||||
local old_running_key = 'b_'..id..'_running'
|
||||
local old_executing_key = 'b_'..id..'_executing'
|
||||
|
||||
if redis.call('exists', old_running_key) == 1 then
|
||||
redis.call('rename', old_running_key, job_weights_key)
|
||||
end
|
||||
if redis.call('exists', old_executing_key) == 1 then
|
||||
redis.call('rename', old_executing_key, job_expirations_key)
|
||||
end
|
||||
redis.call('hset', settings_key, 'version', '2.14.0')
|
||||
end
|
||||
|
||||
-- 2.15.2
|
||||
if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then
|
||||
redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0)
|
||||
redis.call('hset', settings_key, 'version', '2.15.2')
|
||||
end
|
||||
|
||||
-- 2.17.0
|
||||
if version_digits[2] < 17 then
|
||||
redis.call('hsetnx', settings_key, 'clientTimeout', 10000)
|
||||
redis.call('hset', settings_key, 'version', '2.17.0')
|
||||
end
|
||||
|
||||
-- 2.18.0
|
||||
if version_digits[2] < 18 then
|
||||
redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '')
|
||||
redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '')
|
||||
redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '')
|
||||
redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now)
|
||||
redis.call('hset', settings_key, 'version', '2.18.0')
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
process_tick(now, false)
|
||||
end
|
||||
|
||||
local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))
|
||||
refresh_expiration(0, 0, groupTimeout)
|
||||
|
||||
return {}
|
214
node_modules/bottleneck/src/redis/process_tick.lua
generated
vendored
Normal file
214
node_modules/bottleneck/src/redis/process_tick.lua
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
local process_tick = function (now, always_publish)
|
||||
|
||||
local compute_capacity = function (maxConcurrent, running, reservoir)
|
||||
if maxConcurrent ~= nil and reservoir ~= nil then
|
||||
return math.min((maxConcurrent - running), reservoir)
|
||||
elseif maxConcurrent ~= nil then
|
||||
return maxConcurrent - running
|
||||
elseif reservoir ~= nil then
|
||||
return reservoir
|
||||
else
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
local settings = redis.call('hmget', settings_key,
|
||||
'id',
|
||||
'maxConcurrent',
|
||||
'running',
|
||||
'reservoir',
|
||||
'reservoirRefreshInterval',
|
||||
'reservoirRefreshAmount',
|
||||
'lastReservoirRefresh',
|
||||
'reservoirIncreaseInterval',
|
||||
'reservoirIncreaseAmount',
|
||||
'reservoirIncreaseMaximum',
|
||||
'lastReservoirIncrease',
|
||||
'capacityPriorityCounter',
|
||||
'clientTimeout'
|
||||
)
|
||||
local id = settings[1]
|
||||
local maxConcurrent = tonumber(settings[2])
|
||||
local running = tonumber(settings[3])
|
||||
local reservoir = tonumber(settings[4])
|
||||
local reservoirRefreshInterval = tonumber(settings[5])
|
||||
local reservoirRefreshAmount = tonumber(settings[6])
|
||||
local lastReservoirRefresh = tonumber(settings[7])
|
||||
local reservoirIncreaseInterval = tonumber(settings[8])
|
||||
local reservoirIncreaseAmount = tonumber(settings[9])
|
||||
local reservoirIncreaseMaximum = tonumber(settings[10])
|
||||
local lastReservoirIncrease = tonumber(settings[11])
|
||||
local capacityPriorityCounter = tonumber(settings[12])
|
||||
local clientTimeout = tonumber(settings[13])
|
||||
|
||||
local initial_capacity = compute_capacity(maxConcurrent, running, reservoir)
|
||||
|
||||
--
|
||||
-- Process 'running' changes
|
||||
--
|
||||
local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now)
|
||||
|
||||
if #expired > 0 then
|
||||
redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now)
|
||||
|
||||
local flush_batch = function (batch, acc)
|
||||
local weights = redis.call('hmget', job_weights_key, unpack(batch))
|
||||
redis.call('hdel', job_weights_key, unpack(batch))
|
||||
local clients = redis.call('hmget', job_clients_key, unpack(batch))
|
||||
redis.call('hdel', job_clients_key, unpack(batch))
|
||||
|
||||
-- Calculate sum of removed weights
|
||||
for i = 1, #weights do
|
||||
acc['total'] = acc['total'] + (tonumber(weights[i]) or 0)
|
||||
end
|
||||
|
||||
-- Calculate sum of removed weights by client
|
||||
local client_weights = {}
|
||||
for i = 1, #clients do
|
||||
local removed = tonumber(weights[i]) or 0
|
||||
if removed > 0 then
|
||||
acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local acc = {
|
||||
['total'] = 0,
|
||||
['client_weights'] = {}
|
||||
}
|
||||
local batch_size = 1000
|
||||
|
||||
-- Compute changes to Zsets and apply changes to Hashes
|
||||
for i = 1, #expired, batch_size do
|
||||
local batch = {}
|
||||
for j = i, math.min(i + batch_size - 1, #expired) do
|
||||
table.insert(batch, expired[j])
|
||||
end
|
||||
|
||||
flush_batch(batch, acc)
|
||||
end
|
||||
|
||||
-- Apply changes to Zsets
|
||||
if acc['total'] > 0 then
|
||||
redis.call('hincrby', settings_key, 'done', acc['total'])
|
||||
running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total']))
|
||||
end
|
||||
|
||||
for client, weight in pairs(acc['client_weights']) do
|
||||
redis.call('zincrby', client_running_key, -weight, client)
|
||||
end
|
||||
end
|
||||
|
||||
--
|
||||
-- Process 'reservoir' changes
|
||||
--
|
||||
local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil
|
||||
if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then
|
||||
reservoir = reservoirRefreshAmount
|
||||
redis.call('hmset', settings_key,
|
||||
'reservoir', reservoir,
|
||||
'lastReservoirRefresh', now
|
||||
)
|
||||
end
|
||||
|
||||
local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil
|
||||
if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then
|
||||
local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval)
|
||||
local incr = reservoirIncreaseAmount * num_intervals
|
||||
if reservoirIncreaseMaximum ~= nil then
|
||||
incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0))
|
||||
end
|
||||
if incr > 0 then
|
||||
reservoir = (reservoir or 0) + incr
|
||||
end
|
||||
redis.call('hmset', settings_key,
|
||||
'reservoir', reservoir,
|
||||
'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval)
|
||||
)
|
||||
end
|
||||
|
||||
--
|
||||
-- Clear unresponsive clients
|
||||
--
|
||||
local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout))
|
||||
local unresponsive_lookup = {}
|
||||
local terminated_clients = {}
|
||||
for i = 1, #unresponsive do
|
||||
unresponsive_lookup[unresponsive[i]] = true
|
||||
if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then
|
||||
table.insert(terminated_clients, unresponsive[i])
|
||||
end
|
||||
end
|
||||
if #terminated_clients > 0 then
|
||||
redis.call('zrem', client_running_key, unpack(terminated_clients))
|
||||
redis.call('hdel', client_num_queued_key, unpack(terminated_clients))
|
||||
redis.call('zrem', client_last_registered_key, unpack(terminated_clients))
|
||||
redis.call('zrem', client_last_seen_key, unpack(terminated_clients))
|
||||
end
|
||||
|
||||
--
|
||||
-- Broadcast capacity changes
|
||||
--
|
||||
local final_capacity = compute_capacity(maxConcurrent, running, reservoir)
|
||||
|
||||
if always_publish or (initial_capacity ~= nil and final_capacity == nil) then
|
||||
-- always_publish or was not unlimited, now unlimited
|
||||
redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))
|
||||
|
||||
elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then
|
||||
-- capacity was increased
|
||||
-- send the capacity message to the limiter having the lowest number of running jobs
|
||||
-- the tiebreaker is the limiter having not registered a job in the longest time
|
||||
|
||||
local lowest_concurrency_value = nil
|
||||
local lowest_concurrency_clients = {}
|
||||
local lowest_concurrency_last_registered = {}
|
||||
local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores')
|
||||
|
||||
for i = 1, #client_concurrencies, 2 do
|
||||
local client = client_concurrencies[i]
|
||||
local concurrency = tonumber(client_concurrencies[i+1])
|
||||
|
||||
if (
|
||||
lowest_concurrency_value == nil or lowest_concurrency_value == concurrency
|
||||
) and (
|
||||
not unresponsive_lookup[client]
|
||||
) and (
|
||||
tonumber(redis.call('hget', client_num_queued_key, client)) > 0
|
||||
) then
|
||||
lowest_concurrency_value = concurrency
|
||||
table.insert(lowest_concurrency_clients, client)
|
||||
local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client))
|
||||
table.insert(lowest_concurrency_last_registered, last_registered)
|
||||
end
|
||||
end
|
||||
|
||||
if #lowest_concurrency_clients > 0 then
|
||||
local position = 1
|
||||
local earliest = lowest_concurrency_last_registered[1]
|
||||
|
||||
for i,v in ipairs(lowest_concurrency_last_registered) do
|
||||
if v < earliest then
|
||||
position = i
|
||||
earliest = v
|
||||
end
|
||||
end
|
||||
|
||||
local next_client = lowest_concurrency_clients[position]
|
||||
redis.call('publish', 'b_'..id,
|
||||
'capacity-priority:'..(final_capacity or '')..
|
||||
':'..next_client..
|
||||
':'..capacityPriorityCounter
|
||||
)
|
||||
redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1')
|
||||
else
|
||||
redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))
|
||||
end
|
||||
end
|
||||
|
||||
return {
|
||||
['capacity'] = final_capacity,
|
||||
['running'] = running,
|
||||
['reservoir'] = reservoir
|
||||
}
|
||||
end
|
10
node_modules/bottleneck/src/redis/queued.lua
generated
vendored
Normal file
10
node_modules/bottleneck/src/redis/queued.lua
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout'))
|
||||
local valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf')
|
||||
local client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients))
|
||||
|
||||
local sum = 0
|
||||
for i = 1, #client_queued do
|
||||
sum = sum + tonumber(client_queued[i])
|
||||
end
|
||||
|
||||
return sum
|
11
node_modules/bottleneck/src/redis/refresh_expiration.lua
generated
vendored
Normal file
11
node_modules/bottleneck/src/redis/refresh_expiration.lua
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
local refresh_expiration = function (now, nextRequest, groupTimeout)
|
||||
|
||||
if groupTimeout ~= nil then
|
||||
local ttl = (nextRequest + groupTimeout) - now
|
||||
|
||||
for i = 1, #KEYS do
|
||||
redis.call('pexpire', KEYS[i], ttl)
|
||||
end
|
||||
end
|
||||
|
||||
end
|
13
node_modules/bottleneck/src/redis/refs.lua
generated
vendored
Normal file
13
node_modules/bottleneck/src/redis/refs.lua
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
local settings_key = KEYS[1]
|
||||
local job_weights_key = KEYS[2]
|
||||
local job_expirations_key = KEYS[3]
|
||||
local job_clients_key = KEYS[4]
|
||||
local client_running_key = KEYS[5]
|
||||
local client_num_queued_key = KEYS[6]
|
||||
local client_last_registered_key = KEYS[7]
|
||||
local client_last_seen_key = KEYS[8]
|
||||
|
||||
local now = tonumber(ARGV[1])
|
||||
local client = ARGV[2]
|
||||
|
||||
local num_static_argv = 2
|
51
node_modules/bottleneck/src/redis/register.lua
generated
vendored
Normal file
51
node_modules/bottleneck/src/redis/register.lua
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
local index = ARGV[num_static_argv + 1]
|
||||
local weight = tonumber(ARGV[num_static_argv + 2])
|
||||
local expiration = tonumber(ARGV[num_static_argv + 3])
|
||||
|
||||
local state = process_tick(now, false)
|
||||
local capacity = state['capacity']
|
||||
local reservoir = state['reservoir']
|
||||
|
||||
local settings = redis.call('hmget', settings_key,
|
||||
'nextRequest',
|
||||
'minTime',
|
||||
'groupTimeout'
|
||||
)
|
||||
local nextRequest = tonumber(settings[1])
|
||||
local minTime = tonumber(settings[2])
|
||||
local groupTimeout = tonumber(settings[3])
|
||||
|
||||
if conditions_check(capacity, weight) then
|
||||
|
||||
redis.call('hincrby', settings_key, 'running', weight)
|
||||
redis.call('hset', job_weights_key, index, weight)
|
||||
if expiration ~= nil then
|
||||
redis.call('zadd', job_expirations_key, now + expiration, index)
|
||||
end
|
||||
redis.call('hset', job_clients_key, index, client)
|
||||
redis.call('zincrby', client_running_key, weight, client)
|
||||
redis.call('hincrby', client_num_queued_key, client, -1)
|
||||
redis.call('zadd', client_last_registered_key, now, client)
|
||||
|
||||
local wait = math.max(nextRequest - now, 0)
|
||||
local newNextRequest = now + wait + minTime
|
||||
|
||||
if reservoir == nil then
|
||||
redis.call('hset', settings_key,
|
||||
'nextRequest', newNextRequest
|
||||
)
|
||||
else
|
||||
reservoir = reservoir - weight
|
||||
redis.call('hmset', settings_key,
|
||||
'reservoir', reservoir,
|
||||
'nextRequest', newNextRequest
|
||||
)
|
||||
end
|
||||
|
||||
refresh_expiration(now, newNextRequest, groupTimeout)
|
||||
|
||||
return {true, wait, reservoir}
|
||||
|
||||
else
|
||||
return {false}
|
||||
end
|
12
node_modules/bottleneck/src/redis/register_client.lua
generated
vendored
Normal file
12
node_modules/bottleneck/src/redis/register_client.lua
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
local queued = tonumber(ARGV[num_static_argv + 1])
|
||||
|
||||
-- Could have been re-registered concurrently
|
||||
if not redis.call('zscore', client_last_seen_key, client) then
|
||||
redis.call('zadd', client_running_key, 0, client)
|
||||
redis.call('hset', client_num_queued_key, client, queued)
|
||||
redis.call('zadd', client_last_registered_key, 0, client)
|
||||
end
|
||||
|
||||
redis.call('zadd', client_last_seen_key, now, client)
|
||||
|
||||
return {}
|
1
node_modules/bottleneck/src/redis/running.lua
generated
vendored
Normal file
1
node_modules/bottleneck/src/redis/running.lua
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
return process_tick(now, false)['running']
|
74
node_modules/bottleneck/src/redis/submit.lua
generated
vendored
Normal file
74
node_modules/bottleneck/src/redis/submit.lua
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
local queueLength = tonumber(ARGV[num_static_argv + 1])
|
||||
local weight = tonumber(ARGV[num_static_argv + 2])
|
||||
|
||||
local capacity = process_tick(now, false)['capacity']
|
||||
|
||||
local settings = redis.call('hmget', settings_key,
|
||||
'id',
|
||||
'maxConcurrent',
|
||||
'highWater',
|
||||
'nextRequest',
|
||||
'strategy',
|
||||
'unblockTime',
|
||||
'penalty',
|
||||
'minTime',
|
||||
'groupTimeout'
|
||||
)
|
||||
local id = settings[1]
|
||||
local maxConcurrent = tonumber(settings[2])
|
||||
local highWater = tonumber(settings[3])
|
||||
local nextRequest = tonumber(settings[4])
|
||||
local strategy = tonumber(settings[5])
|
||||
local unblockTime = tonumber(settings[6])
|
||||
local penalty = tonumber(settings[7])
|
||||
local minTime = tonumber(settings[8])
|
||||
local groupTimeout = tonumber(settings[9])
|
||||
|
||||
if maxConcurrent ~= nil and weight > maxConcurrent then
|
||||
return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent)
|
||||
end
|
||||
|
||||
local reachedHWM = (highWater ~= nil and queueLength == highWater
|
||||
and not (
|
||||
conditions_check(capacity, weight)
|
||||
and nextRequest - now <= 0
|
||||
)
|
||||
)
|
||||
|
||||
local blocked = strategy == 3 and (reachedHWM or unblockTime >= now)
|
||||
|
||||
if blocked then
|
||||
local computedPenalty = penalty
|
||||
if computedPenalty == nil then
|
||||
if minTime == 0 then
|
||||
computedPenalty = 5000
|
||||
else
|
||||
computedPenalty = 15 * minTime
|
||||
end
|
||||
end
|
||||
|
||||
local newNextRequest = now + computedPenalty + minTime
|
||||
|
||||
redis.call('hmset', settings_key,
|
||||
'unblockTime', now + computedPenalty,
|
||||
'nextRequest', newNextRequest
|
||||
)
|
||||
|
||||
local clients_queued_reset = redis.call('hkeys', client_num_queued_key)
|
||||
local queued_reset = {}
|
||||
for i = 1, #clients_queued_reset do
|
||||
table.insert(queued_reset, clients_queued_reset[i])
|
||||
table.insert(queued_reset, 0)
|
||||
end
|
||||
redis.call('hmset', client_num_queued_key, unpack(queued_reset))
|
||||
|
||||
redis.call('publish', 'b_'..id, 'blocked:')
|
||||
|
||||
refresh_expiration(now, newNextRequest, groupTimeout)
|
||||
end
|
||||
|
||||
if not blocked and not reachedHWM then
|
||||
redis.call('hincrby', client_num_queued_key, client, 1)
|
||||
end
|
||||
|
||||
return {reachedHWM, blocked, strategy}
|
14
node_modules/bottleneck/src/redis/update_settings.lua
generated
vendored
Normal file
14
node_modules/bottleneck/src/redis/update_settings.lua
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
local args = {'hmset', settings_key}
|
||||
|
||||
for i = num_static_argv + 1, #ARGV do
|
||||
table.insert(args, ARGV[i])
|
||||
end
|
||||
|
||||
redis.call(unpack(args))
|
||||
|
||||
process_tick(now, true)
|
||||
|
||||
local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))
|
||||
refresh_expiration(0, 0, groupTimeout)
|
||||
|
||||
return {}
|
5
node_modules/bottleneck/src/redis/validate_client.lua
generated
vendored
Normal file
5
node_modules/bottleneck/src/redis/validate_client.lua
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
if not redis.call('zscore', client_last_seen_key, client) then
|
||||
return redis.error_reply('UNKNOWN_CLIENT')
|
||||
end
|
||||
|
||||
redis.call('zadd', client_last_seen_key, now, client)
|
3
node_modules/bottleneck/src/redis/validate_keys.lua
generated
vendored
Normal file
3
node_modules/bottleneck/src/redis/validate_keys.lua
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
if not (redis.call('exists', settings_key) == 1) then
|
||||
return redis.error_reply('SETTINGS_KEY_NOT_FOUND')
|
||||
end
|
Reference in New Issue
Block a user