Compare commits
25 Commits
releases/v
...
releases/v
Author | SHA1 | Date | |
---|---|---|---|
78c309ef59 | |||
0e414c630a | |||
9a2319ecaf | |||
7363c39621 | |||
742de9953a | |||
e234343e3e | |||
c99850e5d1 | |||
845942e04a | |||
1a522d88d8 | |||
56b4c8a8ef | |||
723bed634e | |||
2e7c800734 | |||
6247f9299a | |||
5d578441a6 | |||
42a316b84f | |||
6ecc92f5ad | |||
e7b71cc1a7 | |||
f26a08096a | |||
ba42ad9139 | |||
07c8c20669 | |||
00c56dd770 | |||
50c843ac1c | |||
ef036888ec | |||
2758344bdf | |||
18d8be76a0 |
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@ -0,0 +1 @@
|
||||
ko_fi: softprops
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
__tests__/runner/*
|
||||
# actions requires a node_modules dir https://github.com/actions/toolkit/blob/master/docs/javascript-action.md#publish-a-releasesv1-action
|
||||
# but its recommended not to check these in https://github.com/actions/toolkit/blob/master/docs/action-versioning.md#recommendations
|
||||
#node_modules
|
||||
|
11
CHANGELOG.md
11
CHANGELOG.md
@ -1,3 +1,12 @@
|
||||
## 0.1.4
|
||||
|
||||
* Steps can now access the url of releases with the `url` output of this Action [#28](https://github.com/softprops/action-gh-release/pull/28)
|
||||
* Added basic GitHub API retry support to manage API turbulance [#26](https://github.com/softprops/action-gh-release/pull/26)
|
||||
|
||||
## 0.1.3
|
||||
|
||||
* Fixed where `with: body_path` was not being used in generated GitHub releases
|
||||
|
||||
## 0.1.2
|
||||
|
||||
* Add support for merging draft releases [#16](https://github.com/softprops/action-gh-release/pull/16)
|
||||
@ -22,7 +31,7 @@ GitHub actions inputs don't inherently support lists of things and one might lik
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
|
||||
* Add support for prerelease annotated GitHub releases with the new input field `with.prerelease: true`
|
||||
* Add support for prerelease annotated GitHub releases with the new input field `with.prerelease: true` [#19](https://github.com/softprops/action-gh-release/pull/19)
|
||||
|
||||
---
|
||||
|
||||
|
47
README.md
47
README.md
@ -1,9 +1,28 @@
|
||||
|
||||
# action gh-release [](https://github.com/softprops/action-gh-release/actions)
|
||||
<div align="center">
|
||||
📦 :octocat:
|
||||
</div>
|
||||
<h1 align="center">
|
||||
action gh-release
|
||||
</h1>
|
||||
|
||||
> A GitHub Action for creating GitHub Releases on Linux, Windows, and OSX virtual environments
|
||||
<p align="center">
|
||||
A GitHub Action for creating GitHub Releases on Linux, Windows, and OSX virtual environments
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
<img src="demo.png"/>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<a href="https://github.com/softprops/action-gh-release/actions">
|
||||
<img src="https://github.com/softprops/action-gh-release/workflows/Main/badge.svg"/>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
|
||||
<br />
|
||||
|
||||

|
||||
|
||||
|
||||
> **⚠️ Note:** To use this action, you must have access to the [GitHub Actions](https://github.com/features/actions) feature. GitHub Actions are currently only available in public beta. You can [apply for the GitHub Actions beta here](https://github.com/features/actions/signup/).
|
||||
@ -28,7 +47,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
@ -51,14 +70,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
|
||||
|
||||
### ⬆️ Uploading release assets
|
||||
|
||||
You can can configure a number of options for your
|
||||
@ -80,7 +98,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
- name: Build
|
||||
run: echo ${{ github.sha }} > Release.txt
|
||||
- name: Test
|
||||
@ -106,7 +124,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
- name: Build
|
||||
run: echo ${{ github.sha }} > Release.txt
|
||||
- name: Test
|
||||
@ -140,7 +158,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
- name: Generate Changelog
|
||||
run: echo "# Good things have arrived" > ${{ github.workflow }}-CHANGELOG.txt
|
||||
- name: Release
|
||||
@ -169,6 +187,17 @@ The following are optional as `step.with` keys
|
||||
|
||||
💡When providing a `body` and `body_path` at the same time, `body_path` will be attempted first, then falling back on `body` if the path can not be read from.
|
||||
|
||||
#### outputs
|
||||
|
||||
The following outputs can be accessed via `${{ steps.<step-id>.outputs }}` from this action
|
||||
|
||||
| Name | Type | Description |
|
||||
|-------------|---------|-----------------------------------------------------------------|
|
||||
| `url` | String | Github.com URL for the release |
|
||||
|
||||
|
||||
|
||||
|
||||
#### environment variables
|
||||
|
||||
The following are *required* as `step.env` keys
|
||||
|
1
__tests__/release.txt
Normal file
1
__tests__/release.txt
Normal file
@ -0,0 +1 @@
|
||||
bar
|
@ -1,4 +1,10 @@
|
||||
import { isTag, paths, parseConfig, parseInputFiles } from "../src/util";
|
||||
import {
|
||||
releaseBody,
|
||||
isTag,
|
||||
paths,
|
||||
parseConfig,
|
||||
parseInputFiles
|
||||
} from "../src/util";
|
||||
import * as assert from "assert";
|
||||
|
||||
describe("util", () => {
|
||||
@ -16,6 +22,56 @@ describe("util", () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
describe("releaseBody", () => {
|
||||
it("uses input body", () => {
|
||||
assert.equal(
|
||||
"foo",
|
||||
releaseBody({
|
||||
github_ref: "",
|
||||
github_repository: "",
|
||||
github_token: "",
|
||||
input_body: "foo",
|
||||
input_body_path: undefined,
|
||||
input_draft: false,
|
||||
input_prerelease: false,
|
||||
input_files: [],
|
||||
input_name: undefined
|
||||
})
|
||||
);
|
||||
});
|
||||
it("uses input body path", () => {
|
||||
assert.equal(
|
||||
"bar",
|
||||
releaseBody({
|
||||
github_ref: "",
|
||||
github_repository: "",
|
||||
github_token: "",
|
||||
input_body: undefined,
|
||||
input_body_path: "__tests__/release.txt",
|
||||
input_draft: false,
|
||||
input_prerelease: false,
|
||||
input_files: [],
|
||||
input_name: undefined
|
||||
})
|
||||
);
|
||||
});
|
||||
it("defaults to body when both body and body path are provided", () => {
|
||||
assert.equal(
|
||||
"foo",
|
||||
releaseBody({
|
||||
github_ref: "",
|
||||
github_repository: "",
|
||||
github_token: "",
|
||||
input_body: "foo",
|
||||
input_body_path: "__tests__/release.txt",
|
||||
input_draft: false,
|
||||
input_prerelease: false,
|
||||
input_files: [],
|
||||
input_name: undefined
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
describe("parseConfig", () => {
|
||||
it("parses basic config", () => {
|
||||
assert.deepStrictEqual(parseConfig({}), {
|
||||
|
@ -6,7 +6,7 @@ inputs:
|
||||
body:
|
||||
description: 'Note-worthy description of changes in release'
|
||||
required: false
|
||||
body-path:
|
||||
body_path:
|
||||
description: 'Path to load note-worthy description of changes in release from'
|
||||
required: false
|
||||
name:
|
||||
@ -23,6 +23,9 @@ inputs:
|
||||
required: false
|
||||
env:
|
||||
'GITHUB_TOKEN': 'As provided by Github Actions'
|
||||
outputs:
|
||||
url:
|
||||
description: 'URL to the Release HTML Page'
|
||||
runs:
|
||||
using: 'node12'
|
||||
main: 'lib/main.js'
|
||||
|
@ -16,6 +16,7 @@ var __asyncValues = (this && this.__asyncValues) || function (o) {
|
||||
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const util_1 = require("./util");
|
||||
const fs_1 = require("fs");
|
||||
const mime_1 = require("mime");
|
||||
const path_1 = require("path");
|
||||
@ -30,7 +31,8 @@ class GitHubReleaser {
|
||||
return this.github.repos.createRelease(params);
|
||||
}
|
||||
allReleases(params) {
|
||||
return this.github.paginate.iterator(this.github.repos.listReleases.endpoint.merge(params));
|
||||
const updatedParams = Object.assign({ per_page: 100 }, params);
|
||||
return this.github.paginate.iterator(this.github.repos.listReleases.endpoint.merge(updatedParams));
|
||||
}
|
||||
}
|
||||
exports.GitHubReleaser = GitHubReleaser;
|
||||
@ -98,7 +100,7 @@ exports.release = (config, releaser) => __awaiter(void 0, void 0, void 0, functi
|
||||
try {
|
||||
const tag_name = tag;
|
||||
const name = config.input_name || tag;
|
||||
const body = config.input_body;
|
||||
const body = util_1.releaseBody(config);
|
||||
const draft = config.input_draft;
|
||||
const prerelease = config.input_prerelease;
|
||||
console.log(`👩🏭 Creating new GitHub release for tag ${tag_name}...`);
|
||||
|
17
lib/main.js
17
lib/main.js
@ -21,7 +21,21 @@ function run() {
|
||||
if (!util_1.isTag(config.github_ref)) {
|
||||
throw new Error(`⚠️ GitHub Releases requires a tag`);
|
||||
}
|
||||
const gh = new github_2.GitHub(config.github_token);
|
||||
github_2.GitHub.plugin(require("@octokit/plugin-throttling"));
|
||||
const gh = new github_2.GitHub(config.github_token, {
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
console.warn(`Request quota exhausted for request ${options.method} ${options.url}`);
|
||||
if (options.request.retryCount === 0) {
|
||||
// only retries once
|
||||
console.log(`Retrying after ${retryAfter} seconds!`);
|
||||
return true;
|
||||
}
|
||||
},
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
// does not retry, only logs a warning
|
||||
console.warn(`Abuse detected for request ${options.method} ${options.url}`);
|
||||
}
|
||||
});
|
||||
let rel = yield github_1.release(config, new github_1.GitHubReleaser(gh));
|
||||
if (config.input_files) {
|
||||
util_1.paths(config.input_files).forEach((path) => __awaiter(this, void 0, void 0, function* () {
|
||||
@ -29,6 +43,7 @@ function run() {
|
||||
}));
|
||||
}
|
||||
console.log(`🎉 Release ready at ${rel.html_url}`);
|
||||
core_1.setOutput("url", rel.html_url);
|
||||
}
|
||||
catch (error) {
|
||||
core_1.setFailed(error.message);
|
||||
|
@ -9,6 +9,11 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const glob = __importStar(require("glob"));
|
||||
const fs_1 = require("fs");
|
||||
exports.releaseBody = (config) => {
|
||||
return (config.input_body ||
|
||||
(config.input_body_path &&
|
||||
fs_1.readFileSync(config.input_body_path).toString("utf8")));
|
||||
};
|
||||
exports.parseInputFiles = (files) => {
|
||||
return files.split(/\r?\n/).reduce((acc, line) => acc
|
||||
.concat(line.split(","))
|
||||
|
42
node_modules/@octokit/plugin-throttling/.travis.yml
generated
vendored
Normal file
42
node_modules/@octokit/plugin-throttling/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
language: node_js
|
||||
cache: npm
|
||||
|
||||
# Trigger a push build on master and greenkeeper branches + PRs build on every branches
|
||||
# Avoid double build on PRs (See https://github.com/travis-ci/travis-ci/issues/1147)
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^greenkeeper.*$/
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- stage: test
|
||||
node_js: 8
|
||||
- node_js: 10
|
||||
env: Node 10 & coverage upload
|
||||
after_script:
|
||||
- npm run coverage:upload
|
||||
- node_js: lts/*
|
||||
env: memory-test
|
||||
script: npm run test:memory
|
||||
- stage: release
|
||||
env: semantic-release
|
||||
node_js: lts/*
|
||||
script: npx semantic-release
|
||||
|
||||
# when Greenkeeper updates @octokit/routes, run "generate-routes" script
|
||||
# and push new routes.json file to the pull request
|
||||
- stage: greenkeeper-routes-update
|
||||
node_js: lts/*
|
||||
script:
|
||||
- git checkout $TRAVIS_BRANCH
|
||||
- node scripts/generate-routes
|
||||
# commit changes and push back to branch on GitHub. If there are no changes then exit without error
|
||||
- 'git commit -a -m "build: routes" --author="Octokit Bot <octokitbot@martynus.net>" && git push "https://${GH_TOKEN}@github.com/$TRAVIS_REPO_SLUG" ${TRAVIS_BRANCH} || true'
|
||||
|
||||
stages:
|
||||
- test
|
||||
- name: release
|
||||
if: branch = master AND type IN (push)
|
||||
- name: greenkeeper-routes-update
|
||||
if: branch =~ ^greenkeeper/@octokit/routes
|
46
node_modules/@octokit/plugin-throttling/CODE_OF_CONDUCT.md
generated
vendored
Normal file
46
node_modules/@octokit/plugin-throttling/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at opensource+octokit@github.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
21
node_modules/@octokit/plugin-throttling/LICENSE
generated
vendored
Normal file
21
node_modules/@octokit/plugin-throttling/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2018 Octokit contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
107
node_modules/@octokit/plugin-throttling/README.md
generated
vendored
Normal file
107
node_modules/@octokit/plugin-throttling/README.md
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
# plugin-throttling.js
|
||||
|
||||
> Octokit plugin for GitHub’s recommended request throttling
|
||||
|
||||
[](https://www.npmjs.com/package/@octokit/plugin-throttling)
|
||||
[](https://travis-ci.com/octokit/plugin-throttling.js)
|
||||
[](https://coveralls.io/github/octokit/plugin-throttling.js)
|
||||
[](https://greenkeeper.io/)
|
||||
|
||||
Implements all [recommended best practises](https://developer.github.com/v3/guides/best-practices-for-integrators/) to prevent hitting abuse rate limits.
|
||||
|
||||
## Usage
|
||||
|
||||
The code below creates a "Hello, world!" issue on every repository in a given organization. Without the throttling plugin it would send many requests in parallel and would hit rate limits very quickly. But the `@octokit/plugin-throttling` slows down your requests according to the official guidelines, so you don't get blocked before your quota is exhausted.
|
||||
|
||||
The `throttle.onAbuseLimit` and `throttle.onRateLimit` options are required. Return `true` to automatically retry the request after `retryAfter` seconds.
|
||||
|
||||
```js
|
||||
const Octokit = require('@octokit/rest')
|
||||
.plugin(require('@octokit/plugin-throttling'))
|
||||
|
||||
const octokit = new Octokit({
|
||||
auth: `token ${process.env.TOKEN}`,
|
||||
throttle: {
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
console.warn(`Request quota exhausted for request ${options.method} ${options.url}`)
|
||||
|
||||
if (options.request.retryCount === 0) { // only retries once
|
||||
console.log(`Retrying after ${retryAfter} seconds!`)
|
||||
return true
|
||||
}
|
||||
},
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
// does not retry, only logs a warning
|
||||
console.warn(`Abuse detected for request ${options.method} ${options.url}`)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
async function createIssueOnAllRepos (org) {
|
||||
const repos = await octokit.paginate(octokit.repos.listForOrg.endpoint({ org }))
|
||||
return Promise.all(repos.forEach(({ name } => {
|
||||
octokit.issues.create({
|
||||
owner,
|
||||
repo: name,
|
||||
title: 'Hello, world!'
|
||||
})
|
||||
})))
|
||||
}
|
||||
```
|
||||
|
||||
Pass `{ throttle: { enabled: false } }` to disable this plugin.
|
||||
|
||||
### Clustering
|
||||
|
||||
Enabling Clustering support ensures that your application will not go over rate limits **across Octokit instances and across Nodejs processes**.
|
||||
|
||||
First install either `redis` or `ioredis`:
|
||||
```
|
||||
# NodeRedis (https://github.com/NodeRedis/node_redis)
|
||||
npm install --save redis
|
||||
|
||||
# or ioredis (https://github.com/luin/ioredis)
|
||||
npm install --save ioredis
|
||||
```
|
||||
|
||||
Then in your application:
|
||||
```js
|
||||
const Bottleneck = require('bottleneck')
|
||||
const Redis = require('redis')
|
||||
|
||||
const client = Redis.createClient({ /* options */ })
|
||||
const connection = new Bottleneck.RedisConnection({ client })
|
||||
connection.on('error', err => console.error(err))
|
||||
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (retryAfter, options) => { /* ... */ },
|
||||
onRateLimit: (retryAfter, options) => { /* ... */ },
|
||||
|
||||
// The Bottleneck connection object
|
||||
connection,
|
||||
|
||||
// A "throttling ID". All octokit instances with the same ID
|
||||
// using the same Redis server will share the throttling.
|
||||
id: 'my-super-app',
|
||||
|
||||
// Otherwise the plugin uses a lighter version of Bottleneck without Redis support
|
||||
Bottleneck
|
||||
}
|
||||
})
|
||||
|
||||
// To close the connection and allow your application to exit cleanly:
|
||||
await connection.disconnect()
|
||||
```
|
||||
|
||||
To use the `ioredis` library instead:
|
||||
```js
|
||||
const Redis = require('ioredis')
|
||||
const client = new Redis({ /* options */ })
|
||||
const connection = new Bottleneck.IORedisConnection({ client })
|
||||
connection.on('error', err => console.error(err))
|
||||
```
|
||||
|
||||
## LICENSE
|
||||
|
||||
[MIT](LICENSE)
|
128
node_modules/@octokit/plugin-throttling/lib/index.js
generated
vendored
Normal file
128
node_modules/@octokit/plugin-throttling/lib/index.js
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
module.exports = throttlingPlugin
|
||||
|
||||
const BottleneckLight = require('bottleneck/light')
|
||||
const wrapRequest = require('./wrap-request')
|
||||
const triggersNotificationPaths = require('./triggers-notification-paths')
|
||||
const routeMatcher = require('./route-matcher')(triggersNotificationPaths)
|
||||
|
||||
// Workaround to allow tests to directly access the triggersNotification function.
|
||||
const triggersNotification = throttlingPlugin.triggersNotification =
|
||||
routeMatcher.test.bind(routeMatcher)
|
||||
|
||||
const groups = {}
|
||||
|
||||
const createGroups = function (Bottleneck, common) {
|
||||
groups.global = new Bottleneck.Group({
|
||||
id: 'octokit-global',
|
||||
maxConcurrent: 10,
|
||||
...common
|
||||
})
|
||||
groups.search = new Bottleneck.Group({
|
||||
id: 'octokit-search',
|
||||
maxConcurrent: 1,
|
||||
minTime: 2000,
|
||||
...common
|
||||
})
|
||||
groups.write = new Bottleneck.Group({
|
||||
id: 'octokit-write',
|
||||
maxConcurrent: 1,
|
||||
minTime: 1000,
|
||||
...common
|
||||
})
|
||||
groups.notifications = new Bottleneck.Group({
|
||||
id: 'octokit-notifications',
|
||||
maxConcurrent: 1,
|
||||
minTime: 3000,
|
||||
...common
|
||||
})
|
||||
}
|
||||
|
||||
function throttlingPlugin (octokit, octokitOptions = {}) {
|
||||
const {
|
||||
enabled = true,
|
||||
Bottleneck = BottleneckLight,
|
||||
id = 'no-id',
|
||||
timeout = 1000 * 60 * 2, // Redis TTL: 2 minutes
|
||||
connection
|
||||
} = octokitOptions.throttle || {}
|
||||
if (!enabled) {
|
||||
return
|
||||
}
|
||||
const common = { connection, timeout }
|
||||
|
||||
if (groups.global == null) {
|
||||
createGroups(Bottleneck, common)
|
||||
}
|
||||
|
||||
const state = Object.assign({
|
||||
clustering: connection != null,
|
||||
triggersNotification,
|
||||
minimumAbuseRetryAfter: 5,
|
||||
retryAfterBaseValue: 1000,
|
||||
retryLimiter: new Bottleneck(),
|
||||
id,
|
||||
...groups
|
||||
}, octokitOptions.throttle)
|
||||
|
||||
if (typeof state.onAbuseLimit !== 'function' || typeof state.onRateLimit !== 'function') {
|
||||
throw new Error(`octokit/plugin-throttling error:
|
||||
You must pass the onAbuseLimit and onRateLimit error handlers.
|
||||
See https://github.com/octokit/rest.js#throttling
|
||||
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (error, options) => {/* ... */},
|
||||
onRateLimit: (error, options) => {/* ... */}
|
||||
}
|
||||
})
|
||||
`)
|
||||
}
|
||||
|
||||
const events = {}
|
||||
const emitter = new Bottleneck.Events(events)
|
||||
events.on('abuse-limit', state.onAbuseLimit)
|
||||
events.on('rate-limit', state.onRateLimit)
|
||||
events.on('error', e => console.warn('Error in throttling-plugin limit handler', e))
|
||||
|
||||
state.retryLimiter.on('failed', async function (error, info) {
|
||||
const options = info.args[info.args.length - 1]
|
||||
const isGraphQL = options.url.startsWith('/graphql')
|
||||
|
||||
if (!(isGraphQL || error.status === 403)) {
|
||||
return
|
||||
}
|
||||
|
||||
const retryCount = ~~options.request.retryCount
|
||||
options.request.retryCount = retryCount
|
||||
|
||||
const { wantRetry, retryAfter } = await (async function () {
|
||||
if (/\babuse\b/i.test(error.message)) {
|
||||
// The user has hit the abuse rate limit. (REST only)
|
||||
// https://developer.github.com/v3/#abuse-rate-limits
|
||||
|
||||
// The Retry-After header can sometimes be blank when hitting an abuse limit,
|
||||
// but is always present after 2-3s, so make sure to set `retryAfter` to at least 5s by default.
|
||||
const retryAfter = Math.max(~~error.headers['retry-after'], state.minimumAbuseRetryAfter)
|
||||
const wantRetry = await emitter.trigger('abuse-limit', retryAfter, options)
|
||||
return { wantRetry, retryAfter }
|
||||
}
|
||||
if (error.headers != null && error.headers['x-ratelimit-remaining'] === '0') {
|
||||
// The user has used all their allowed calls for the current time period (REST and GraphQL)
|
||||
// https://developer.github.com/v3/#rate-limiting
|
||||
|
||||
const rateLimitReset = new Date(~~error.headers['x-ratelimit-reset'] * 1000).getTime()
|
||||
const retryAfter = Math.max(Math.ceil((rateLimitReset - Date.now()) / 1000), 0)
|
||||
const wantRetry = await emitter.trigger('rate-limit', retryAfter, options)
|
||||
return { wantRetry, retryAfter }
|
||||
}
|
||||
return {}
|
||||
})()
|
||||
|
||||
if (wantRetry) {
|
||||
options.request.retryCount++
|
||||
return retryAfter * state.retryAfterBaseValue
|
||||
}
|
||||
})
|
||||
|
||||
octokit.hook.wrap('request', wrapRequest.bind(null, state))
|
||||
}
|
31
node_modules/@octokit/plugin-throttling/lib/route-matcher.js
generated
vendored
Normal file
31
node_modules/@octokit/plugin-throttling/lib/route-matcher.js
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
module.exports = routeMatcher
|
||||
|
||||
function routeMatcher (paths) {
|
||||
// EXAMPLE. For the following paths:
|
||||
/* [
|
||||
"/orgs/:org/invitations",
|
||||
"/repos/:owner/:repo/collaborators/:username"
|
||||
] */
|
||||
|
||||
const regexes = paths.map(p =>
|
||||
p.split('/')
|
||||
.map(c => c.startsWith(':') ? '(?:.+?)' : c)
|
||||
.join('/')
|
||||
)
|
||||
// 'regexes' would contain:
|
||||
/* [
|
||||
'/orgs/(?:.+?)/invitations',
|
||||
'/repos/(?:.+?)/(?:.+?)/collaborators/(?:.+?)'
|
||||
] */
|
||||
|
||||
const regex = `^(?:${regexes.map(r => `(?:${r})`).join('|')})[^/]*$`
|
||||
// 'regex' would contain:
|
||||
/*
|
||||
^(?:(?:\/orgs\/(?:.+?)\/invitations)|(?:\/repos\/(?:.+?)\/(?:.+?)\/collaborators\/(?:.+?)))[^\/]*$
|
||||
|
||||
It may look scary, but paste it into https://www.debuggex.com/
|
||||
and it will make a lot more sense!
|
||||
*/
|
||||
|
||||
return new RegExp(regex, 'i')
|
||||
}
|
15
node_modules/@octokit/plugin-throttling/lib/triggers-notification-paths.json
generated
vendored
Normal file
15
node_modules/@octokit/plugin-throttling/lib/triggers-notification-paths.json
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
[
|
||||
"/orgs/:org/invitations",
|
||||
"/repos/:owner/:repo/collaborators/:username",
|
||||
"/repos/:owner/:repo/commits/:sha/comments",
|
||||
"/repos/:owner/:repo/issues",
|
||||
"/repos/:owner/:repo/issues/:issue_number/comments",
|
||||
"/repos/:owner/:repo/pulls",
|
||||
"/repos/:owner/:repo/pulls/:pull_number/comments",
|
||||
"/repos/:owner/:repo/pulls/:pull_number/merge",
|
||||
"/repos/:owner/:repo/pulls/:pull_number/requested_reviewers",
|
||||
"/repos/:owner/:repo/pulls/:pull_number/reviews",
|
||||
"/repos/:owner/:repo/releases",
|
||||
"/teams/:team_id/discussions",
|
||||
"/teams/:team_id/discussions/:discussion_number/comments"
|
||||
]
|
49
node_modules/@octokit/plugin-throttling/lib/wrap-request.js
generated
vendored
Normal file
49
node_modules/@octokit/plugin-throttling/lib/wrap-request.js
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
module.exports = wrapRequest
|
||||
|
||||
const noop = () => Promise.resolve()
|
||||
|
||||
function wrapRequest (state, request, options) {
|
||||
return state.retryLimiter.schedule(doRequest, state, request, options)
|
||||
}
|
||||
|
||||
async function doRequest (state, request, options) {
|
||||
const isWrite = options.method !== 'GET' && options.method !== 'HEAD'
|
||||
const isSearch = options.method === 'GET' && options.url.startsWith('/search/')
|
||||
const isGraphQL = options.url.startsWith('/graphql')
|
||||
|
||||
const retryCount = ~~options.request.retryCount
|
||||
const jobOptions = retryCount > 0 ? { priority: 0, weight: 0 } : {}
|
||||
if (state.clustering) {
|
||||
// Remove a job from Redis if it has not completed or failed within 60s
|
||||
// Examples: Node process terminated, client disconnected, etc.
|
||||
jobOptions.expiration = 1000 * 60
|
||||
}
|
||||
|
||||
// Guarantee at least 1000ms between writes
|
||||
// GraphQL can also trigger writes
|
||||
if (isWrite || isGraphQL) {
|
||||
await state.write.key(state.id).schedule(jobOptions, noop)
|
||||
}
|
||||
|
||||
// Guarantee at least 3000ms between requests that trigger notifications
|
||||
if (isWrite && state.triggersNotification(options.url)) {
|
||||
await state.notifications.key(state.id).schedule(jobOptions, noop)
|
||||
}
|
||||
|
||||
// Guarantee at least 2000ms between search requests
|
||||
if (isSearch) {
|
||||
await state.search.key(state.id).schedule(jobOptions, noop)
|
||||
}
|
||||
|
||||
const req = state.global.key(state.id).schedule(jobOptions, request, options)
|
||||
if (isGraphQL) {
|
||||
const res = await req
|
||||
if (res.data.errors != null && res.data.errors.some((err) => err.type === 'RATE_LIMITED')) {
|
||||
const err = new Error('GraphQL Rate Limit Exceeded')
|
||||
err.headers = res.headers
|
||||
err.data = res.data
|
||||
throw err
|
||||
}
|
||||
}
|
||||
return req
|
||||
}
|
85
node_modules/@octokit/plugin-throttling/package.json
generated
vendored
Normal file
85
node_modules/@octokit/plugin-throttling/package.json
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
{
|
||||
"_args": [
|
||||
[
|
||||
"@octokit/plugin-throttling@2.6.0",
|
||||
"/Users/dougtangren/code/rust/action-gh-release"
|
||||
]
|
||||
],
|
||||
"_from": "@octokit/plugin-throttling@2.6.0",
|
||||
"_id": "@octokit/plugin-throttling@2.6.0",
|
||||
"_inBundle": false,
|
||||
"_integrity": "sha512-E0xQrcD36sVEeBhut6j9nWX38vm/1LKMRSUqjvJ/mqGLXfHr4jYMsrR3I/nT2QC0eJL1/SKMt7zxOt7pZiFhDA==",
|
||||
"_location": "/@octokit/plugin-throttling",
|
||||
"_phantomChildren": {},
|
||||
"_requested": {
|
||||
"type": "version",
|
||||
"registry": true,
|
||||
"raw": "@octokit/plugin-throttling@2.6.0",
|
||||
"name": "@octokit/plugin-throttling",
|
||||
"escapedName": "@octokit%2fplugin-throttling",
|
||||
"scope": "@octokit",
|
||||
"rawSpec": "2.6.0",
|
||||
"saveSpec": null,
|
||||
"fetchSpec": "2.6.0"
|
||||
},
|
||||
"_requiredBy": [
|
||||
"/"
|
||||
],
|
||||
"_resolved": "https://registry.npmjs.org/@octokit/plugin-throttling/-/plugin-throttling-2.6.0.tgz",
|
||||
"_spec": "2.6.0",
|
||||
"_where": "/Users/dougtangren/code/rust/action-gh-release",
|
||||
"author": {
|
||||
"name": "Simon Grondin",
|
||||
"url": "http://github.com/SGrondin"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/octokit/plugin-throttling.js/issues"
|
||||
},
|
||||
"dependencies": {
|
||||
"bottleneck": "^2.15.3"
|
||||
},
|
||||
"description": "Automatic rate limiting plugin for octokit",
|
||||
"devDependencies": {
|
||||
"@octokit/request": "3.0.3",
|
||||
"@octokit/rest": "^16.3.0",
|
||||
"@octokit/routes": "20.2.4",
|
||||
"chai": "^4.2.0",
|
||||
"coveralls": "^3.0.2",
|
||||
"leakage": "^0.4.0",
|
||||
"mocha": "^6.0.2",
|
||||
"nyc": "^14.0.0",
|
||||
"semantic-release": "^15.13.8",
|
||||
"standard": "^12.0.1"
|
||||
},
|
||||
"homepage": "https://github.com/octokit/plugin-throttling.js#readme",
|
||||
"license": "MIT",
|
||||
"main": "lib/index.js",
|
||||
"name": "@octokit/plugin-throttling",
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"tag": "latest"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/octokit/plugin-throttling.js.git"
|
||||
},
|
||||
"scripts": {
|
||||
"coverage": "nyc report --reporter=html && open coverage/index.html",
|
||||
"coverage:upload": "nyc report --reporter=text-lcov | coveralls",
|
||||
"pretest": "standard",
|
||||
"test": "nyc mocha test/integration/",
|
||||
"test:memory": "node test/memory-leakage-test"
|
||||
},
|
||||
"standard": {
|
||||
"globals": [
|
||||
"describe",
|
||||
"before",
|
||||
"beforeEach",
|
||||
"afterEach",
|
||||
"after",
|
||||
"it",
|
||||
"expect"
|
||||
]
|
||||
},
|
||||
"version": "2.6.0"
|
||||
}
|
22
node_modules/@octokit/plugin-throttling/scripts/generate-routes.js
generated
vendored
Normal file
22
node_modules/@octokit/plugin-throttling/scripts/generate-routes.js
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
/**
|
||||
* We do not want to have `@octokit/routes` as a production dependency due to
|
||||
* its huge size. We are only interested in the REST API endpoint paths that
|
||||
* trigger notifications. So instead we automatically generate a file that
|
||||
* only contains these paths when @octokit/routes has a new release.
|
||||
*/
|
||||
const { writeFileSync } = require('fs')
|
||||
|
||||
const routes = require('@octokit/routes')
|
||||
const paths = []
|
||||
|
||||
Object.keys(routes).forEach(scope => {
|
||||
const scopeEndpoints = routes[scope]
|
||||
scopeEndpoints.forEach(endpoint => {
|
||||
if (endpoint.triggersNotification) {
|
||||
paths.push(endpoint.path)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
const uniquePaths = [...new Set(paths.sort())]
|
||||
writeFileSync('./lib/triggers-notification-paths.json', JSON.stringify(uniquePaths, null, 2) + '\n')
|
170
node_modules/@octokit/plugin-throttling/test/integration/events.js
generated
vendored
Normal file
170
node_modules/@octokit/plugin-throttling/test/integration/events.js
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
const expect = require('chai').expect
|
||||
const Octokit = require('./octokit')
|
||||
|
||||
describe('Events', function () {
|
||||
it('Should support non-limit 403s', async function () {
|
||||
const octokit = new Octokit({ throttle: { onAbuseLimit: () => 1, onRateLimit: () => 1 } })
|
||||
let caught = false
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
expect(error.message).to.equal('Test failed request (403)')
|
||||
caught = true
|
||||
}
|
||||
|
||||
expect(caught).to.equal(true)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route1',
|
||||
'END GET /route1',
|
||||
'START GET /route2'
|
||||
])
|
||||
})
|
||||
|
||||
describe('\'abuse-limit\'', function () {
|
||||
it('Should detect abuse limit and broadcast event', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(retryAfter).to.equal(60)
|
||||
expect(options).to.include({ method: 'GET', url: '/route2' })
|
||||
expect(options.request.retryCount).to.equal(0)
|
||||
eventCount++
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: { 'retry-after': '60' }, data: { message: 'You have been rate limited to prevent abuse' } }]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(403)
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(1)
|
||||
})
|
||||
|
||||
it('Should ensure retryAfter is a minimum of 5s', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(retryAfter).to.equal(5)
|
||||
expect(options).to.include({ method: 'GET', url: '/route2' })
|
||||
expect(options.request.retryCount).to.equal(0)
|
||||
eventCount++
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: { 'retry-after': '2' }, data: { message: 'You have been rate limited to prevent abuse' } }]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(403)
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(1)
|
||||
})
|
||||
|
||||
it('Should broadcast retryAfter of 5s even when the header is missing', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(retryAfter).to.equal(5)
|
||||
expect(options).to.include({ method: 'GET', url: '/route2' })
|
||||
expect(options.request.retryCount).to.equal(0)
|
||||
eventCount++
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: {}, data: { message: 'You have been rate limited to prevent abuse' } }]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(403)
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(1)
|
||||
})
|
||||
})
|
||||
|
||||
describe('\'rate-limit\'', function () {
|
||||
it('Should detect rate limit exceeded and broadcast event', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
expect(retryAfter).to.be.closeTo(30, 1)
|
||||
expect(options).to.include({ method: 'GET', url: '/route2' })
|
||||
expect(options.request.retryCount).to.equal(0)
|
||||
eventCount++
|
||||
},
|
||||
onAbuseLimit: () => 1
|
||||
}
|
||||
})
|
||||
const t0 = Date.now()
|
||||
|
||||
await octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
try {
|
||||
await octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 403, headers: { 'x-ratelimit-remaining': '0', 'x-ratelimit-reset': `${Math.round(t0 / 1000) + 30}` }, data: {} }]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(403)
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(1)
|
||||
})
|
||||
})
|
||||
})
|
291
node_modules/@octokit/plugin-throttling/test/integration/index.js
generated
vendored
Normal file
291
node_modules/@octokit/plugin-throttling/test/integration/index.js
generated
vendored
Normal file
@ -0,0 +1,291 @@
|
||||
const Bottleneck = require('bottleneck')
|
||||
const expect = require('chai').expect
|
||||
const Octokit = require('./octokit')
|
||||
|
||||
describe('General', function () {
|
||||
it('Should be possible to disable the plugin', async function () {
|
||||
const octokit = new Octokit({ throttle: { enabled: false } })
|
||||
|
||||
const req1 = octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
const req3 = octokit.request('GET /route3', {
|
||||
request: {
|
||||
responses: [{ status: 203, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route1',
|
||||
'START GET /route2',
|
||||
'START GET /route3',
|
||||
'END GET /route1',
|
||||
'END GET /route2',
|
||||
'END GET /route3'
|
||||
])
|
||||
})
|
||||
|
||||
it('Should require the user to pass both limit handlers', function () {
|
||||
const message = 'You must pass the onAbuseLimit and onRateLimit error handlers'
|
||||
|
||||
expect(() => new Octokit()).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: {} })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onAbuseLimit: 5, onRateLimit: 5 } })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onAbuseLimit: 5, onRateLimit: () => 1 } })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onAbuseLimit: () => 1 } })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onRateLimit: () => 1 } })).to.throw(message)
|
||||
expect(() => new Octokit({ throttle: { onAbuseLimit: () => 1, onRateLimit: () => 1 } })).to.not.throw()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Github API best practices', function () {
|
||||
it('Should linearize requests', async function () {
|
||||
const octokit = new Octokit({ throttle: { onAbuseLimit: () => 1, onRateLimit: () => 1 } })
|
||||
const req1 = octokit.request('GET /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
const req3 = octokit.request('GET /route3', {
|
||||
request: {
|
||||
responses: [{ status: 203, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route1',
|
||||
'END GET /route1',
|
||||
'START GET /route2',
|
||||
'END GET /route2',
|
||||
'START GET /route3',
|
||||
'END GET /route3'
|
||||
])
|
||||
})
|
||||
|
||||
it('Should maintain 1000ms between mutating or GraphQL requests', async function () {
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
onAbuseLimit: () => 1,
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const req1 = octokit.request('POST /route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req3 = octokit.request('POST /route3', {
|
||||
request: {
|
||||
responses: [{ status: 203, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req4 = octokit.request('POST /graphql', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3, req4])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route2',
|
||||
'END GET /route2',
|
||||
'START POST /route1',
|
||||
'END POST /route1',
|
||||
'START POST /route3',
|
||||
'END POST /route3',
|
||||
'START POST /graphql',
|
||||
'END POST /graphql'
|
||||
])
|
||||
expect(octokit.__requestTimings[4] - octokit.__requestTimings[0]).to.be.closeTo(50, 20)
|
||||
expect(octokit.__requestTimings[6] - octokit.__requestTimings[4]).to.be.closeTo(50, 20)
|
||||
})
|
||||
|
||||
it('Should maintain 3000ms between requests that trigger notifications', async function () {
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
notifications: new Bottleneck.Group({ minTime: 100 }),
|
||||
onAbuseLimit: () => 1,
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const req1 = octokit.request('POST /orgs/:org/invitations', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req2 = octokit.request('POST /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req3 = octokit.request('POST /repos/:owner/:repo/commits/:sha/comments', {
|
||||
request: {
|
||||
responses: [{ status: 302, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START POST /orgs/:org/invitations',
|
||||
'END POST /orgs/:org/invitations',
|
||||
'START POST /route2',
|
||||
'END POST /route2',
|
||||
'START POST /repos/:owner/:repo/commits/:sha/comments',
|
||||
'END POST /repos/:owner/:repo/commits/:sha/comments'
|
||||
])
|
||||
expect(octokit.__requestTimings[5] - octokit.__requestTimings[0]).to.be.closeTo(100, 20)
|
||||
})
|
||||
|
||||
it('Should match custom routes when checking notification triggers', function () {
|
||||
const plugin = require('../../lib')
|
||||
|
||||
expect(plugin.triggersNotification('/abc/def')).to.equal(false)
|
||||
expect(plugin.triggersNotification('/orgs/abc/invitation')).to.equal(false)
|
||||
expect(plugin.triggersNotification('/repos/abc/releases')).to.equal(false)
|
||||
expect(plugin.triggersNotification('/repos/abc/def/pulls/5')).to.equal(false)
|
||||
|
||||
expect(plugin.triggersNotification('/repos/abc/def/pulls')).to.equal(true)
|
||||
expect(plugin.triggersNotification('/repos/abc/def/pulls/5/comments')).to.equal(true)
|
||||
expect(plugin.triggersNotification('/repos/foo/bar/issues')).to.equal(true)
|
||||
|
||||
expect(plugin.triggersNotification('/repos/:owner/:repo/pulls')).to.equal(true)
|
||||
expect(plugin.triggersNotification('/repos/:owner/:repo/pulls/5/comments')).to.equal(true)
|
||||
expect(plugin.triggersNotification('/repos/:foo/:bar/issues')).to.equal(true)
|
||||
})
|
||||
|
||||
it('Should maintain 2000ms between search requests', async function () {
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
search: new Bottleneck.Group({ minTime: 50 }),
|
||||
onAbuseLimit: () => 1,
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const req1 = octokit.request('GET /search/route1', {
|
||||
request: {
|
||||
responses: [{ status: 201, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 202, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req3 = octokit.request('GET /search/route3', {
|
||||
request: {
|
||||
responses: [{ status: 203, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3])
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route2',
|
||||
'END GET /route2',
|
||||
'START GET /search/route1',
|
||||
'END GET /search/route1',
|
||||
'START GET /search/route3',
|
||||
'END GET /search/route3'
|
||||
])
|
||||
expect(octokit.__requestTimings[4] - octokit.__requestTimings[2]).to.be.closeTo(50, 20)
|
||||
})
|
||||
|
||||
it('Should optimize throughput rather than maintain ordering', async function () {
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
notifications: new Bottleneck.Group({ minTime: 150 }),
|
||||
onAbuseLimit: () => 1,
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const req1 = octokit.request('POST /orgs/abc/invitations', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req2 = octokit.request('GET /route2', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req3 = octokit.request('GET /route3', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req4 = octokit.request('POST /route4', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req5 = octokit.request('POST /repos/abc/def/commits/12345/comments', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
const req6 = octokit.request('PATCH /orgs/abc/invitations', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.all([req1, req2, req3, req4, req5, req6])
|
||||
await octokit.request('GET /route6', {
|
||||
request: {
|
||||
responses: [{ status: 200, headers: {}, data: {} }]
|
||||
}
|
||||
})
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route2',
|
||||
'END GET /route2',
|
||||
'START GET /route3',
|
||||
'END GET /route3',
|
||||
'START POST /orgs/abc/invitations',
|
||||
'END POST /orgs/abc/invitations',
|
||||
'START POST /route4',
|
||||
'END POST /route4',
|
||||
'START POST /repos/abc/def/commits/12345/comments',
|
||||
'END POST /repos/abc/def/commits/12345/comments',
|
||||
'START PATCH /orgs/abc/invitations',
|
||||
'END PATCH /orgs/abc/invitations',
|
||||
'START GET /route6',
|
||||
'END GET /route6'
|
||||
])
|
||||
|
||||
expect(octokit.__requestTimings[2] - octokit.__requestTimings[0]).to.be.closeTo(0, 20)
|
||||
expect(octokit.__requestTimings[4] - octokit.__requestTimings[2]).to.be.closeTo(0, 20)
|
||||
expect(octokit.__requestTimings[6] - octokit.__requestTimings[4]).to.be.closeTo(50, 20)
|
||||
expect(octokit.__requestTimings[8] - octokit.__requestTimings[6]).to.be.closeTo(100, 20)
|
||||
expect(octokit.__requestTimings[10] - octokit.__requestTimings[8]).to.be.closeTo(150, 20)
|
||||
expect(octokit.__requestTimings[12] - octokit.__requestTimings[10]).to.be.closeTo(0, 30)
|
||||
})
|
||||
})
|
28
node_modules/@octokit/plugin-throttling/test/integration/octokit.js
generated
vendored
Normal file
28
node_modules/@octokit/plugin-throttling/test/integration/octokit.js
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
const Octokit = require('@octokit/rest')
|
||||
const HttpError = require('@octokit/request/lib/http-error')
|
||||
const throttlingPlugin = require('../..')
|
||||
|
||||
module.exports = Octokit
|
||||
.plugin((octokit) => {
|
||||
octokit.__t0 = Date.now()
|
||||
octokit.__requestLog = []
|
||||
octokit.__requestTimings = []
|
||||
|
||||
octokit.hook.wrap('request', async (request, options) => {
|
||||
octokit.__requestLog.push(`START ${options.method} ${options.url}`)
|
||||
octokit.__requestTimings.push(Date.now() - octokit.__t0)
|
||||
await new Promise(resolve => setTimeout(resolve, 0))
|
||||
|
||||
const res = options.request.responses.shift()
|
||||
if (res.status >= 400) {
|
||||
const message = res.data.message != null ? res.data.message : `Test failed request (${res.status})`
|
||||
const error = new HttpError(message, res.status, res.headers, options)
|
||||
throw error
|
||||
} else {
|
||||
octokit.__requestLog.push(`END ${options.method} ${options.url}`)
|
||||
octokit.__requestTimings.push(Date.now() - octokit.__t0)
|
||||
return res
|
||||
}
|
||||
})
|
||||
})
|
||||
.plugin(throttlingPlugin)
|
193
node_modules/@octokit/plugin-throttling/test/integration/retry.js
generated
vendored
Normal file
193
node_modules/@octokit/plugin-throttling/test/integration/retry.js
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
const Bottleneck = require('bottleneck')
|
||||
const expect = require('chai').expect
|
||||
const Octokit = require('./octokit')
|
||||
|
||||
describe('Retry', function () {
|
||||
describe('REST', function () {
|
||||
it('Should retry \'abuse-limit\' and succeed', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
minimumAbuseRetryAfter: 0,
|
||||
retryAfterBaseValue: 50,
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(options).to.include({ method: 'GET', url: '/route' })
|
||||
expect(options.request.retryCount).to.equal(eventCount)
|
||||
expect(retryAfter).to.equal(eventCount + 1)
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const res = await octokit.request('GET /route', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 403, headers: { 'retry-after': '1' }, data: { message: 'You have been rate limited to prevent abuse' } },
|
||||
{ status: 200, headers: {}, data: { message: 'Success!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
expect(res.status).to.equal(200)
|
||||
expect(res.data).to.include({ message: 'Success!' })
|
||||
expect(eventCount).to.equal(1)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route',
|
||||
'START GET /route',
|
||||
'END GET /route'
|
||||
])
|
||||
expect(octokit.__requestTimings[1] - octokit.__requestTimings[0]).to.be.closeTo(50, 20)
|
||||
})
|
||||
|
||||
it('Should retry \'abuse-limit\' twice and fail', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
minimumAbuseRetryAfter: 0,
|
||||
retryAfterBaseValue: 50,
|
||||
onAbuseLimit: (retryAfter, options) => {
|
||||
expect(options).to.include({ method: 'GET', url: '/route' })
|
||||
expect(options.request.retryCount).to.equal(eventCount)
|
||||
expect(retryAfter).to.equal(eventCount + 1)
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onRateLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const message = 'You have been rate limited to prevent abuse'
|
||||
try {
|
||||
await octokit.request('GET /route', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 403, headers: { 'retry-after': '1' }, data: { message } },
|
||||
{ status: 403, headers: { 'retry-after': '2' }, data: { message } },
|
||||
{ status: 404, headers: { 'retry-after': '3' }, data: { message: 'Nope!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
throw new Error('Should not reach this point')
|
||||
} catch (error) {
|
||||
expect(error.status).to.equal(404)
|
||||
expect(error.message).to.equal('Nope!')
|
||||
}
|
||||
|
||||
expect(eventCount).to.equal(2)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route',
|
||||
'START GET /route',
|
||||
'START GET /route'
|
||||
])
|
||||
expect(octokit.__requestTimings[1] - octokit.__requestTimings[0]).to.be.closeTo(50, 20)
|
||||
expect(octokit.__requestTimings[2] - octokit.__requestTimings[1]).to.be.closeTo(100, 20)
|
||||
})
|
||||
|
||||
it('Should retry \'rate-limit\' and succeed', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
expect(options).to.include({ method: 'GET', url: '/route' })
|
||||
expect(options.request.retryCount).to.equal(eventCount)
|
||||
expect(retryAfter).to.equal(0)
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onAbuseLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const res = await octokit.request('GET /route', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 403, headers: { 'x-ratelimit-remaining': '0', 'x-ratelimit-reset': `123` }, data: {} },
|
||||
{ status: 202, headers: {}, data: { message: 'Yay!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
expect(res.status).to.equal(202)
|
||||
expect(res.data).to.include({ message: 'Yay!' })
|
||||
expect(eventCount).to.equal(1)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START GET /route',
|
||||
'START GET /route',
|
||||
'END GET /route'
|
||||
])
|
||||
expect(octokit.__requestTimings[1] - octokit.__requestTimings[0]).to.be.closeTo(0, 20)
|
||||
})
|
||||
})
|
||||
|
||||
describe('GraphQL', function () {
|
||||
it('Should retry \'rate-limit\' and succeed', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
expect(options).to.include({ method: 'POST', url: '/graphql' })
|
||||
expect(options.request.retryCount).to.equal(eventCount)
|
||||
expect(retryAfter).to.equal(0)
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onAbuseLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const res = await octokit.request('POST /graphql', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 200, headers: { 'x-ratelimit-remaining': '0', 'x-ratelimit-reset': `123` }, data: { errors: [{ type: 'RATE_LIMITED' }] } },
|
||||
{ status: 200, headers: {}, data: { message: 'Yay!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
expect(res.status).to.equal(200)
|
||||
expect(res.data).to.include({ message: 'Yay!' })
|
||||
expect(eventCount).to.equal(1)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START POST /graphql',
|
||||
'END POST /graphql',
|
||||
'START POST /graphql',
|
||||
'END POST /graphql'
|
||||
])
|
||||
expect(octokit.__requestTimings[2] - octokit.__requestTimings[0]).to.be.closeTo(50, 20)
|
||||
})
|
||||
|
||||
it('Should ignore other error types', async function () {
|
||||
let eventCount = 0
|
||||
const octokit = new Octokit({
|
||||
throttle: {
|
||||
write: new Bottleneck.Group({ minTime: 50 }),
|
||||
onRateLimit: (retryAfter, options) => {
|
||||
eventCount++
|
||||
return true
|
||||
},
|
||||
onAbuseLimit: () => 1
|
||||
}
|
||||
})
|
||||
|
||||
const res = await octokit.request('POST /graphql', {
|
||||
request: {
|
||||
responses: [
|
||||
{ status: 200, headers: { 'x-ratelimit-remaining': '0', 'x-ratelimit-reset': `123` }, data: { errors: [{ type: 'HELLO_WORLD' }] } },
|
||||
{ status: 200, headers: {}, data: { message: 'Yay!' } }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
expect(res.status).to.equal(200)
|
||||
expect(res.data).to.deep.equal({ errors: [ { type: 'HELLO_WORLD' } ] })
|
||||
expect(eventCount).to.equal(0)
|
||||
expect(octokit.__requestLog).to.deep.equal([
|
||||
'START POST /graphql',
|
||||
'END POST /graphql'
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
14
node_modules/@octokit/plugin-throttling/test/memory-leakage-test.js
generated
vendored
Normal file
14
node_modules/@octokit/plugin-throttling/test/memory-leakage-test.js
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
const { iterate } = require('leakage')
|
||||
const Octokit = require('@octokit/rest')
|
||||
.plugin(require('..'))
|
||||
|
||||
const result = iterate(() => {
|
||||
Octokit({
|
||||
throttle: {
|
||||
onAbuseLimit: () => {},
|
||||
onRateLimit: () => {}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
result.printSummary()
|
5
node_modules/bottleneck/.babelrc.es5
generated
vendored
Normal file
5
node_modules/bottleneck/.babelrc.es5
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"presets": [
|
||||
["@babel/preset-env", {}]
|
||||
]
|
||||
}
|
9
node_modules/bottleneck/.babelrc.lib
generated
vendored
Normal file
9
node_modules/bottleneck/.babelrc.lib
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
{
|
||||
"presets": [
|
||||
["@babel/preset-env", {
|
||||
"targets": {
|
||||
"node": "6.0"
|
||||
}
|
||||
}]
|
||||
]
|
||||
}
|
2
node_modules/bottleneck/.env
generated
vendored
Normal file
2
node_modules/bottleneck/.env
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
REDIS_HOST=127.0.0.1
|
||||
REDIS_PORT=6379
|
25
node_modules/bottleneck/.travis.yml
generated
vendored
Normal file
25
node_modules/bottleneck/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- 8
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- next
|
||||
services:
|
||||
- redis-server
|
||||
env:
|
||||
global:
|
||||
- "REDIS_HOST=127.0.0.1"
|
||||
- "REDIS_PORT=6379"
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.npm
|
||||
install:
|
||||
- npm i
|
||||
sudo: required
|
||||
after_success: npx codecov --file=./coverage/lcov.info
|
||||
script: npm run test-all
|
||||
|
||||
before_install:
|
||||
- npm i -g npm@5.10
|
||||
- npm --version
|
20
node_modules/bottleneck/LICENSE
generated
vendored
Normal file
20
node_modules/bottleneck/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Grondin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
1027
node_modules/bottleneck/README.md
generated
vendored
Normal file
1027
node_modules/bottleneck/README.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
629
node_modules/bottleneck/bottleneck.d.ts
generated
vendored
Normal file
629
node_modules/bottleneck/bottleneck.d.ts
generated
vendored
Normal file
@ -0,0 +1,629 @@
|
||||
declare module "bottleneck" {
|
||||
namespace Bottleneck {
|
||||
type ConstructorOptions = {
|
||||
/**
|
||||
* How many jobs can be running at the same time.
|
||||
*/
|
||||
readonly maxConcurrent?: number | null;
|
||||
/**
|
||||
* How long to wait after launching a job before launching another one.
|
||||
*/
|
||||
readonly minTime?: number | null;
|
||||
/**
|
||||
* How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load.
|
||||
*/
|
||||
readonly highWater?: number | null;
|
||||
/**
|
||||
* Which strategy to use if the queue gets longer than the high water mark.
|
||||
*/
|
||||
readonly strategy?: Bottleneck.Strategy | null;
|
||||
/**
|
||||
* The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy.
|
||||
*/
|
||||
readonly penalty?: number | null;
|
||||
/**
|
||||
* How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`.
|
||||
*/
|
||||
readonly reservoir?: number | null;
|
||||
/**
|
||||
* Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`.
|
||||
*/
|
||||
readonly reservoirRefreshInterval?: number | null;
|
||||
/**
|
||||
* The value to reset `reservoir` to when `reservoirRefreshInterval` is in use.
|
||||
*/
|
||||
readonly reservoirRefreshAmount?: number | null;
|
||||
/**
|
||||
* The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use.
|
||||
*/
|
||||
readonly reservoirIncreaseAmount?: number | null;
|
||||
/**
|
||||
* Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`.
|
||||
*/
|
||||
readonly reservoirIncreaseInterval?: number | null;
|
||||
/**
|
||||
* The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use.
|
||||
*/
|
||||
readonly reservoirIncreaseMaximum?: number | null;
|
||||
/**
|
||||
* Optional identifier
|
||||
*/
|
||||
readonly id?: string | null;
|
||||
/**
|
||||
* Set to true to leave your failed jobs hanging instead of failing them.
|
||||
*/
|
||||
readonly rejectOnDrop?: boolean | null;
|
||||
/**
|
||||
* Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory.
|
||||
*/
|
||||
readonly trackDoneStatus?: boolean | null;
|
||||
/**
|
||||
* Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering.
|
||||
*/
|
||||
readonly datastore?: string | null;
|
||||
/**
|
||||
* Override the Promise library used by Bottleneck.
|
||||
*/
|
||||
readonly Promise?: any;
|
||||
/**
|
||||
* This object is passed directly to the redis client library you've selected.
|
||||
*/
|
||||
readonly clientOptions?: any;
|
||||
/**
|
||||
* **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
|
||||
*/
|
||||
readonly clusterNodes?: any;
|
||||
/**
|
||||
* An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use.
|
||||
* If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored.
|
||||
*/
|
||||
/**
|
||||
* Optional Redis/IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require Redis/IORedis at runtime.
|
||||
*/
|
||||
readonly Redis?: any;
|
||||
/**
|
||||
* Bottleneck connection object created from `new Bottleneck.RedisConnection` or `new Bottleneck.IORedisConnection`.
|
||||
*/
|
||||
readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection | null;
|
||||
/**
|
||||
* When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db.
|
||||
*/
|
||||
readonly clearDatastore?: boolean | null;
|
||||
/**
|
||||
* The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group.
|
||||
*/
|
||||
readonly timeout?: number | null;
|
||||
|
||||
[propName: string]: any;
|
||||
};
|
||||
type JobOptions = {
|
||||
/**
|
||||
* A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`.
|
||||
*/
|
||||
readonly priority?: number | null;
|
||||
/**
|
||||
* Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using).
|
||||
*/
|
||||
readonly weight?: number | null;
|
||||
/**
|
||||
* The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`.
|
||||
*/
|
||||
readonly expiration?: number | null;
|
||||
/**
|
||||
* Optional identifier, helps with debug output.
|
||||
*/
|
||||
readonly id?: string | null;
|
||||
};
|
||||
type StopOptions = {
|
||||
/**
|
||||
* When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method.
|
||||
*/
|
||||
readonly dropWaitingJobs?: boolean | null;
|
||||
/**
|
||||
* The error message used to drop jobs when `dropWaitingJobs` is `true`.
|
||||
*/
|
||||
readonly dropErrorMessage?: string | null;
|
||||
/**
|
||||
* The error message used to reject a job added to the limiter after `stop()` has been called.
|
||||
*/
|
||||
readonly enqueueErrorMessage?: string | null;
|
||||
};
|
||||
type Callback<T> = (err: any, result: T) => void;
|
||||
type ClientsList = { client?: any; subscriber?: any };
|
||||
type GroupLimiterPair = { key: string; limiter: Bottleneck };
|
||||
interface Strategy {}
|
||||
|
||||
type EventInfo = {
|
||||
readonly args: any[];
|
||||
readonly options: {
|
||||
readonly id: string;
|
||||
readonly priority: number;
|
||||
readonly weight: number;
|
||||
readonly expiration?: number;
|
||||
};
|
||||
};
|
||||
type EventInfoDropped = EventInfo & {
|
||||
readonly task: Function;
|
||||
readonly promise: Promise<any>;
|
||||
};
|
||||
type EventInfoQueued = EventInfo & {
|
||||
readonly reachedHWM: boolean;
|
||||
readonly blocked: boolean;
|
||||
};
|
||||
type EventInfoRetryable = EventInfo & { readonly retryCount: number; };
|
||||
|
||||
enum Status {
|
||||
RECEIVED = "RECEIVED",
|
||||
QUEUED = "QUEUED",
|
||||
RUNNING = "RUNNING",
|
||||
EXECUTING = "EXECUTING",
|
||||
DONE = "DONE"
|
||||
}
|
||||
type Counts = {
|
||||
RECEIVED: number,
|
||||
QUEUED: number,
|
||||
RUNNING: number,
|
||||
EXECUTING: number,
|
||||
DONE?: number
|
||||
};
|
||||
|
||||
type RedisConnectionOptions = {
|
||||
/**
|
||||
* This object is passed directly to NodeRedis' createClient() method.
|
||||
*/
|
||||
readonly clientOptions?: any;
|
||||
/**
|
||||
* An existing NodeRedis client to use. If using, `clientOptions` will be ignored.
|
||||
*/
|
||||
readonly client?: any;
|
||||
/**
|
||||
* Optional Redis library from `require('redis')` or equivalent. If not, Bottleneck will attempt to require Redis at runtime.
|
||||
*/
|
||||
readonly Redis?: any;
|
||||
};
|
||||
|
||||
type IORedisConnectionOptions = {
|
||||
/**
|
||||
* This object is passed directly to ioredis' constructor method.
|
||||
*/
|
||||
readonly clientOptions?: any;
|
||||
/**
|
||||
* When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
|
||||
*/
|
||||
readonly clusterNodes?: any;
|
||||
/**
|
||||
* An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored.
|
||||
*/
|
||||
readonly client?: any;
|
||||
/**
|
||||
* Optional IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require IORedis at runtime.
|
||||
*/
|
||||
readonly Redis?: any;
|
||||
};
|
||||
|
||||
type BatcherOptions = {
|
||||
/**
|
||||
* Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event.
|
||||
*/
|
||||
readonly maxTime?: number | null;
|
||||
/**
|
||||
* Maximum number of requests in a batch.
|
||||
*/
|
||||
readonly maxSize?: number | null;
|
||||
};
|
||||
|
||||
class BottleneckError extends Error {
|
||||
}
|
||||
|
||||
class RedisConnection {
|
||||
constructor(options?: Bottleneck.RedisConnectionOptions);
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
|
||||
/**
|
||||
* Waits until the connection is ready and returns the raw Node_Redis clients.
|
||||
*/
|
||||
ready(): Promise<ClientsList>;
|
||||
|
||||
/**
|
||||
* Close the redis clients.
|
||||
* @param flush - Write transient data before closing.
|
||||
*/
|
||||
disconnect(flush?: boolean): Promise<void>;
|
||||
}
|
||||
|
||||
class IORedisConnection {
|
||||
constructor(options?: Bottleneck.IORedisConnectionOptions);
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
|
||||
/**
|
||||
* Waits until the connection is ready and returns the raw ioredis clients.
|
||||
*/
|
||||
ready(): Promise<ClientsList>;
|
||||
|
||||
/**
|
||||
* Close the redis clients.
|
||||
* @param flush - Write transient data before closing.
|
||||
*/
|
||||
disconnect(flush?: boolean): Promise<void>;
|
||||
}
|
||||
|
||||
class Batcher {
|
||||
constructor(options?: Bottleneck.BatcherOptions);
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: string, fn: Function): void;
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
on(name: "batch", fn: (batch: any[]) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: string, fn: Function): void;
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
once(name: "batch", fn: (batch: any[]) => void): void;
|
||||
|
||||
/**
|
||||
* Add a request to the Batcher. Batches are flushed to the "batch" event.
|
||||
*/
|
||||
add(data: any): Promise<void>;
|
||||
}
|
||||
|
||||
class Group {
|
||||
constructor(options?: Bottleneck.ConstructorOptions);
|
||||
|
||||
id: string;
|
||||
datastore: string;
|
||||
connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
|
||||
|
||||
/**
|
||||
* Returns the limiter for the specified key.
|
||||
* @param str - The limiter key.
|
||||
*/
|
||||
key(str: string): Bottleneck;
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: string, fn: Function): void;
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
on(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: string, fn: Function): void;
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
once(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
|
||||
|
||||
/**
|
||||
* Removes all registered event listeners.
|
||||
* @param name - The optional event name to remove listeners from.
|
||||
*/
|
||||
removeAllListeners(name?: string): void;
|
||||
|
||||
/**
|
||||
* Updates the group settings.
|
||||
* @param options - The new settings.
|
||||
*/
|
||||
updateSettings(options: Bottleneck.ConstructorOptions): void;
|
||||
|
||||
/**
|
||||
* Deletes the limiter for the given key.
|
||||
* Returns true if a key was deleted.
|
||||
* @param str - The key
|
||||
*/
|
||||
deleteKey(str: string): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Disconnects the underlying redis clients, unless the Group was created with the `connection` option.
|
||||
* @param flush - Write transient data before closing.
|
||||
*/
|
||||
disconnect(flush?: boolean): Promise<void>;
|
||||
|
||||
/**
|
||||
* Returns all the key-limiter pairs.
|
||||
*/
|
||||
limiters(): Bottleneck.GroupLimiterPair[];
|
||||
|
||||
/**
|
||||
* Returns all Group keys in the local instance
|
||||
*/
|
||||
keys(): string[];
|
||||
|
||||
/**
|
||||
* Returns all Group keys in the Cluster
|
||||
*/
|
||||
clusterKeys(): Promise<string[]>;
|
||||
}
|
||||
|
||||
class Events {
|
||||
constructor(object: Object);
|
||||
|
||||
/**
|
||||
* Returns the number of limiters for the event name
|
||||
* @param name - The event name.
|
||||
*/
|
||||
listenerCount(name: string): number;
|
||||
|
||||
/**
|
||||
* Returns a promise with the first non-null/non-undefined result from a listener
|
||||
* @param name - The event name.
|
||||
* @param args - The arguments to pass to the event listeners.
|
||||
*/
|
||||
trigger(name: string, ...args: any[]): Promise<any>;
|
||||
}
|
||||
}
|
||||
|
||||
class Bottleneck {
|
||||
public static readonly strategy: {
|
||||
/**
|
||||
* When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added.
|
||||
*/
|
||||
readonly LEAK: Bottleneck.Strategy;
|
||||
/**
|
||||
* Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added.
|
||||
*/
|
||||
readonly OVERFLOW_PRIORITY: Bottleneck.Strategy;
|
||||
/**
|
||||
* When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels.
|
||||
*/
|
||||
readonly OVERFLOW: Bottleneck.Strategy;
|
||||
/**
|
||||
* When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels.
|
||||
*/
|
||||
readonly BLOCK: Bottleneck.Strategy;
|
||||
};
|
||||
|
||||
constructor(options?: Bottleneck.ConstructorOptions);
|
||||
|
||||
id: string;
|
||||
datastore: string;
|
||||
connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
|
||||
|
||||
/**
|
||||
* Returns a promise which will be resolved once the limiter is ready to accept jobs
|
||||
* or rejected if it fails to start up.
|
||||
*/
|
||||
ready(): Promise<any>;
|
||||
|
||||
/**
|
||||
* Returns a datastore-specific object of redis clients.
|
||||
*/
|
||||
clients(): Bottleneck.ClientsList;
|
||||
|
||||
/**
|
||||
* Returns the name of the Redis pubsub channel used for this limiter
|
||||
*/
|
||||
channel(): string;
|
||||
|
||||
/**
|
||||
* Disconnects the underlying redis clients, unless the limiter was created with the `connection` option.
|
||||
* @param flush - Write transient data before closing.
|
||||
*/
|
||||
disconnect(flush?: boolean): Promise<void>;
|
||||
|
||||
/**
|
||||
* Broadcast a string to every limiter in the Cluster.
|
||||
*/
|
||||
publish(message: string): Promise<void>;
|
||||
|
||||
/**
|
||||
* Returns an object with the current number of jobs per status.
|
||||
*/
|
||||
counts(): Bottleneck.Counts;
|
||||
|
||||
/**
|
||||
* Returns the status of the job with the provided job id.
|
||||
*/
|
||||
jobStatus(id: string): Bottleneck.Status;
|
||||
|
||||
/**
|
||||
* Returns the status of the job with the provided job id.
|
||||
*/
|
||||
jobs(status?: Bottleneck.Status): string[];
|
||||
|
||||
/**
|
||||
* Returns the number of requests queued.
|
||||
* @param priority - Returns the number of requests queued with the specified priority.
|
||||
*/
|
||||
queued(priority?: number): number;
|
||||
|
||||
/**
|
||||
* Returns the number of requests queued across the Cluster.
|
||||
*/
|
||||
clusterQueued(): Promise<number>;
|
||||
|
||||
/**
|
||||
* Returns whether there are any jobs currently in the queue or in the process of being added to the queue.
|
||||
*/
|
||||
empty(): boolean;
|
||||
|
||||
/**
|
||||
* Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster.
|
||||
*/
|
||||
running(): Promise<number>;
|
||||
|
||||
/**
|
||||
* Returns the total weight of jobs in a DONE state in the Cluster.
|
||||
*/
|
||||
done(): Promise<number>;
|
||||
|
||||
/**
|
||||
* If a request was added right now, would it be run immediately?
|
||||
* @param weight - The weight of the request
|
||||
*/
|
||||
check(weight?: number): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
on(name: "empty", fn: () => void): void;
|
||||
on(name: "idle", fn: () => void): void;
|
||||
on(name: "depleted", fn: (empty: boolean) => void): void;
|
||||
on(name: "message", fn: (message: string) => void): void;
|
||||
on(name: "debug", fn: (message: string, info: any) => void): void;
|
||||
on(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
|
||||
on(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
|
||||
on(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
|
||||
on(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
|
||||
on(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
on(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise<number | void | null> | number | void | null): void;
|
||||
on(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
on(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
once(name: "empty", fn: () => void): void;
|
||||
once(name: "idle", fn: () => void): void;
|
||||
once(name: "depleted", fn: (empty: boolean) => void): void;
|
||||
once(name: "message", fn: (message: string) => void): void;
|
||||
once(name: "debug", fn: (message: string, info: any) => void): void;
|
||||
once(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
|
||||
once(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
|
||||
once(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
|
||||
once(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
|
||||
once(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
once(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise<number | void | null> | number | void | null): void;
|
||||
once(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
once(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
|
||||
/**
|
||||
* Removes all registered event listeners.
|
||||
* @param name - The optional event name to remove listeners from.
|
||||
*/
|
||||
removeAllListeners(name?: string): void;
|
||||
|
||||
/**
|
||||
* Changes the settings for future requests.
|
||||
* @param options - The new settings.
|
||||
*/
|
||||
updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck;
|
||||
|
||||
/**
|
||||
* Adds to the reservoir count and returns the new value.
|
||||
*/
|
||||
incrementReservoir(incrementBy: number): Promise<number>;
|
||||
|
||||
/**
|
||||
* The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete.
|
||||
*/
|
||||
stop(options?: Bottleneck.StopOptions): Promise<void>;
|
||||
|
||||
/**
|
||||
* Returns the current reservoir count, if any.
|
||||
*/
|
||||
currentReservoir(): Promise<number | null>;
|
||||
|
||||
/**
|
||||
* Chain this limiter to another.
|
||||
* @param limiter - The limiter that requests to this limiter must also follow.
|
||||
*/
|
||||
chain(limiter?: Bottleneck): Bottleneck;
|
||||
|
||||
wrap<R>(fn: () => PromiseLike<R>): (() => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions) => Promise<R>; };
|
||||
wrap<R, A1>(fn: (arg1: A1) => PromiseLike<R>): ((arg1: A1) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1) => Promise<R>; };
|
||||
wrap<R, A1, A2>(fn: (arg1: A1, arg2: A2) => PromiseLike<R>): ((arg1: A1, arg2: A2) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2) => Promise<R>; };
|
||||
wrap<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3) => Promise<R>; };
|
||||
wrap<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise<R>; };
|
||||
wrap<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise<R>; };
|
||||
wrap<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise<R>; };
|
||||
wrap<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise<R>; };
|
||||
wrap<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise<R>; };
|
||||
wrap<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise<R>; };
|
||||
wrap<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise<R>; };
|
||||
|
||||
submit<R>(fn: (callback: Bottleneck.Callback<R>) => void, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1>(fn: (arg1: A1, callback: Bottleneck.Callback<R>) => void, arg1: A1, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2>(fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>): void;
|
||||
|
||||
submit<R>(options: Bottleneck.JobOptions, fn: (callback: Bottleneck.Callback<R>) => void, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1>(options: Bottleneck.JobOptions, fn: (arg1: A1, callback: Bottleneck.Callback<R>) => void, arg1: A1, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6, A7>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6, A7, A8>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback<R>): void;
|
||||
submit<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback<R>): void;
|
||||
|
||||
schedule<R>(fn: () => PromiseLike<R>): Promise<R>;
|
||||
schedule<R, A1>(fn: (arg1: A1) => PromiseLike<R>, arg1: A1): Promise<R>;
|
||||
schedule<R, A1, A2>(fn: (arg1: A1, arg2: A2) => PromiseLike<R>, arg1: A1, arg2: A2): Promise<R>;
|
||||
schedule<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6, A7>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise<R>;
|
||||
|
||||
schedule<R>(options: Bottleneck.JobOptions, fn: () => PromiseLike<R>): Promise<R>;
|
||||
schedule<R, A1>(options: Bottleneck.JobOptions, fn: (arg1: A1) => PromiseLike<R>, arg1: A1): Promise<R>;
|
||||
schedule<R, A1, A2>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2) => PromiseLike<R>, arg1: A1, arg2: A2): Promise<R>;
|
||||
schedule<R, A1, A2, A3>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6, A7>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise<R>;
|
||||
schedule<R, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10>(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike<R>, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise<R>;
|
||||
}
|
||||
|
||||
export default Bottleneck;
|
||||
}
|
||||
|
588
node_modules/bottleneck/bottleneck.d.ts.ejs
generated
vendored
Normal file
588
node_modules/bottleneck/bottleneck.d.ts.ejs
generated
vendored
Normal file
@ -0,0 +1,588 @@
|
||||
declare module "bottleneck" {
|
||||
namespace Bottleneck {
|
||||
type ConstructorOptions = {
|
||||
/**
|
||||
* How many jobs can be running at the same time.
|
||||
*/
|
||||
readonly maxConcurrent?: number | null;
|
||||
/**
|
||||
* How long to wait after launching a job before launching another one.
|
||||
*/
|
||||
readonly minTime?: number | null;
|
||||
/**
|
||||
* How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load.
|
||||
*/
|
||||
readonly highWater?: number | null;
|
||||
/**
|
||||
* Which strategy to use if the queue gets longer than the high water mark.
|
||||
*/
|
||||
readonly strategy?: Bottleneck.Strategy | null;
|
||||
/**
|
||||
* The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy.
|
||||
*/
|
||||
readonly penalty?: number | null;
|
||||
/**
|
||||
* How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`.
|
||||
*/
|
||||
readonly reservoir?: number | null;
|
||||
/**
|
||||
* Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`.
|
||||
*/
|
||||
readonly reservoirRefreshInterval?: number | null;
|
||||
/**
|
||||
* The value to reset `reservoir` to when `reservoirRefreshInterval` is in use.
|
||||
*/
|
||||
readonly reservoirRefreshAmount?: number | null;
|
||||
/**
|
||||
* The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use.
|
||||
*/
|
||||
readonly reservoirIncreaseAmount?: number | null;
|
||||
/**
|
||||
* Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`.
|
||||
*/
|
||||
readonly reservoirIncreaseInterval?: number | null;
|
||||
/**
|
||||
* The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use.
|
||||
*/
|
||||
readonly reservoirIncreaseMaximum?: number | null;
|
||||
/**
|
||||
* Optional identifier
|
||||
*/
|
||||
readonly id?: string | null;
|
||||
/**
|
||||
* Set to true to leave your failed jobs hanging instead of failing them.
|
||||
*/
|
||||
readonly rejectOnDrop?: boolean | null;
|
||||
/**
|
||||
* Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory.
|
||||
*/
|
||||
readonly trackDoneStatus?: boolean | null;
|
||||
/**
|
||||
* Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering.
|
||||
*/
|
||||
readonly datastore?: string | null;
|
||||
/**
|
||||
* Override the Promise library used by Bottleneck.
|
||||
*/
|
||||
readonly Promise?: any;
|
||||
/**
|
||||
* This object is passed directly to the redis client library you've selected.
|
||||
*/
|
||||
readonly clientOptions?: any;
|
||||
/**
|
||||
* **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
|
||||
*/
|
||||
readonly clusterNodes?: any;
|
||||
/**
|
||||
* An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use.
|
||||
* If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored.
|
||||
*/
|
||||
/**
|
||||
* Optional Redis/IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require Redis/IORedis at runtime.
|
||||
*/
|
||||
readonly Redis?: any;
|
||||
/**
|
||||
* Bottleneck connection object created from `new Bottleneck.RedisConnection` or `new Bottleneck.IORedisConnection`.
|
||||
*/
|
||||
readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection | null;
|
||||
/**
|
||||
* When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db.
|
||||
*/
|
||||
readonly clearDatastore?: boolean | null;
|
||||
/**
|
||||
* The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group.
|
||||
*/
|
||||
readonly timeout?: number | null;
|
||||
|
||||
[propName: string]: any;
|
||||
};
|
||||
type JobOptions = {
|
||||
/**
|
||||
* A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`.
|
||||
*/
|
||||
readonly priority?: number | null;
|
||||
/**
|
||||
* Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using).
|
||||
*/
|
||||
readonly weight?: number | null;
|
||||
/**
|
||||
* The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`.
|
||||
*/
|
||||
readonly expiration?: number | null;
|
||||
/**
|
||||
* Optional identifier, helps with debug output.
|
||||
*/
|
||||
readonly id?: string | null;
|
||||
};
|
||||
type StopOptions = {
|
||||
/**
|
||||
* When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method.
|
||||
*/
|
||||
readonly dropWaitingJobs?: boolean | null;
|
||||
/**
|
||||
* The error message used to drop jobs when `dropWaitingJobs` is `true`.
|
||||
*/
|
||||
readonly dropErrorMessage?: string | null;
|
||||
/**
|
||||
* The error message used to reject a job added to the limiter after `stop()` has been called.
|
||||
*/
|
||||
readonly enqueueErrorMessage?: string | null;
|
||||
};
|
||||
type Callback<T> = (err: any, result: T) => void;
|
||||
type ClientsList = { client?: any; subscriber?: any };
|
||||
type GroupLimiterPair = { key: string; limiter: Bottleneck };
|
||||
interface Strategy {}
|
||||
|
||||
type EventInfo = {
|
||||
readonly args: any[];
|
||||
readonly options: {
|
||||
readonly id: string;
|
||||
readonly priority: number;
|
||||
readonly weight: number;
|
||||
readonly expiration?: number;
|
||||
};
|
||||
};
|
||||
type EventInfoDropped = EventInfo & {
|
||||
readonly task: Function;
|
||||
readonly promise: Promise<any>;
|
||||
};
|
||||
type EventInfoQueued = EventInfo & {
|
||||
readonly reachedHWM: boolean;
|
||||
readonly blocked: boolean;
|
||||
};
|
||||
type EventInfoRetryable = EventInfo & { readonly retryCount: number; };
|
||||
|
||||
enum Status {
|
||||
RECEIVED = "RECEIVED",
|
||||
QUEUED = "QUEUED",
|
||||
RUNNING = "RUNNING",
|
||||
EXECUTING = "EXECUTING",
|
||||
DONE = "DONE"
|
||||
}
|
||||
type Counts = {
|
||||
RECEIVED: number,
|
||||
QUEUED: number,
|
||||
RUNNING: number,
|
||||
EXECUTING: number,
|
||||
DONE?: number
|
||||
};
|
||||
|
||||
type RedisConnectionOptions = {
|
||||
/**
|
||||
* This object is passed directly to NodeRedis' createClient() method.
|
||||
*/
|
||||
readonly clientOptions?: any;
|
||||
/**
|
||||
* An existing NodeRedis client to use. If using, `clientOptions` will be ignored.
|
||||
*/
|
||||
readonly client?: any;
|
||||
/**
|
||||
* Optional Redis library from `require('redis')` or equivalent. If not, Bottleneck will attempt to require Redis at runtime.
|
||||
*/
|
||||
readonly Redis?: any;
|
||||
};
|
||||
|
||||
type IORedisConnectionOptions = {
|
||||
/**
|
||||
* This object is passed directly to ioredis' constructor method.
|
||||
*/
|
||||
readonly clientOptions?: any;
|
||||
/**
|
||||
* When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
|
||||
*/
|
||||
readonly clusterNodes?: any;
|
||||
/**
|
||||
* An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored.
|
||||
*/
|
||||
readonly client?: any;
|
||||
/**
|
||||
* Optional IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require IORedis at runtime.
|
||||
*/
|
||||
readonly Redis?: any;
|
||||
};
|
||||
|
||||
type BatcherOptions = {
|
||||
/**
|
||||
* Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event.
|
||||
*/
|
||||
readonly maxTime?: number | null;
|
||||
/**
|
||||
* Maximum number of requests in a batch.
|
||||
*/
|
||||
readonly maxSize?: number | null;
|
||||
};
|
||||
|
||||
class BottleneckError extends Error {
|
||||
}
|
||||
|
||||
class RedisConnection {
|
||||
constructor(options?: Bottleneck.RedisConnectionOptions);
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
|
||||
/**
|
||||
* Waits until the connection is ready and returns the raw Node_Redis clients.
|
||||
*/
|
||||
ready(): Promise<ClientsList>;
|
||||
|
||||
/**
|
||||
* Close the redis clients.
|
||||
* @param flush - Write transient data before closing.
|
||||
*/
|
||||
disconnect(flush?: boolean): Promise<void>;
|
||||
}
|
||||
|
||||
class IORedisConnection {
|
||||
constructor(options?: Bottleneck.IORedisConnectionOptions);
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
|
||||
/**
|
||||
* Waits until the connection is ready and returns the raw ioredis clients.
|
||||
*/
|
||||
ready(): Promise<ClientsList>;
|
||||
|
||||
/**
|
||||
* Close the redis clients.
|
||||
* @param flush - Write transient data before closing.
|
||||
*/
|
||||
disconnect(flush?: boolean): Promise<void>;
|
||||
}
|
||||
|
||||
class Batcher {
|
||||
constructor(options?: Bottleneck.BatcherOptions);
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: string, fn: Function): void;
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
on(name: "batch", fn: (batch: any[]) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: string, fn: Function): void;
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
once(name: "batch", fn: (batch: any[]) => void): void;
|
||||
|
||||
/**
|
||||
* Add a request to the Batcher. Batches are flushed to the "batch" event.
|
||||
*/
|
||||
add(data: any): Promise<void>;
|
||||
}
|
||||
|
||||
class Group {
|
||||
constructor(options?: Bottleneck.ConstructorOptions);
|
||||
|
||||
id: string;
|
||||
datastore: string;
|
||||
connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
|
||||
|
||||
/**
|
||||
* Returns the limiter for the specified key.
|
||||
* @param str - The limiter key.
|
||||
*/
|
||||
key(str: string): Bottleneck;
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: string, fn: Function): void;
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
on(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: string, fn: Function): void;
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
once(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
|
||||
|
||||
/**
|
||||
* Removes all registered event listeners.
|
||||
* @param name - The optional event name to remove listeners from.
|
||||
*/
|
||||
removeAllListeners(name?: string): void;
|
||||
|
||||
/**
|
||||
* Updates the group settings.
|
||||
* @param options - The new settings.
|
||||
*/
|
||||
updateSettings(options: Bottleneck.ConstructorOptions): void;
|
||||
|
||||
/**
|
||||
* Deletes the limiter for the given key.
|
||||
* Returns true if a key was deleted.
|
||||
* @param str - The key
|
||||
*/
|
||||
deleteKey(str: string): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Disconnects the underlying redis clients, unless the Group was created with the `connection` option.
|
||||
* @param flush - Write transient data before closing.
|
||||
*/
|
||||
disconnect(flush?: boolean): Promise<void>;
|
||||
|
||||
/**
|
||||
* Returns all the key-limiter pairs.
|
||||
*/
|
||||
limiters(): Bottleneck.GroupLimiterPair[];
|
||||
|
||||
/**
|
||||
* Returns all Group keys in the local instance
|
||||
*/
|
||||
keys(): string[];
|
||||
|
||||
/**
|
||||
* Returns all Group keys in the Cluster
|
||||
*/
|
||||
clusterKeys(): Promise<string[]>;
|
||||
}
|
||||
|
||||
class Events {
|
||||
constructor(object: Object);
|
||||
|
||||
/**
|
||||
* Returns the number of limiters for the event name
|
||||
* @param name - The event name.
|
||||
*/
|
||||
listenerCount(name: string): number;
|
||||
|
||||
/**
|
||||
* Returns a promise with the first non-null/non-undefined result from a listener
|
||||
* @param name - The event name.
|
||||
* @param args - The arguments to pass to the event listeners.
|
||||
*/
|
||||
trigger(name: string, ...args: any[]): Promise<any>;
|
||||
}
|
||||
}
|
||||
|
||||
class Bottleneck {
|
||||
public static readonly strategy: {
|
||||
/**
|
||||
* When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added.
|
||||
*/
|
||||
readonly LEAK: Bottleneck.Strategy;
|
||||
/**
|
||||
* Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added.
|
||||
*/
|
||||
readonly OVERFLOW_PRIORITY: Bottleneck.Strategy;
|
||||
/**
|
||||
* When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels.
|
||||
*/
|
||||
readonly OVERFLOW: Bottleneck.Strategy;
|
||||
/**
|
||||
* When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels.
|
||||
*/
|
||||
readonly BLOCK: Bottleneck.Strategy;
|
||||
};
|
||||
|
||||
constructor(options?: Bottleneck.ConstructorOptions);
|
||||
|
||||
id: string;
|
||||
datastore: string;
|
||||
connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
|
||||
|
||||
/**
|
||||
* Returns a promise which will be resolved once the limiter is ready to accept jobs
|
||||
* or rejected if it fails to start up.
|
||||
*/
|
||||
ready(): Promise<any>;
|
||||
|
||||
/**
|
||||
* Returns a datastore-specific object of redis clients.
|
||||
*/
|
||||
clients(): Bottleneck.ClientsList;
|
||||
|
||||
/**
|
||||
* Returns the name of the Redis pubsub channel used for this limiter
|
||||
*/
|
||||
channel(): string;
|
||||
|
||||
/**
|
||||
* Disconnects the underlying redis clients, unless the limiter was created with the `connection` option.
|
||||
* @param flush - Write transient data before closing.
|
||||
*/
|
||||
disconnect(flush?: boolean): Promise<void>;
|
||||
|
||||
/**
|
||||
* Broadcast a string to every limiter in the Cluster.
|
||||
*/
|
||||
publish(message: string): Promise<void>;
|
||||
|
||||
/**
|
||||
* Returns an object with the current number of jobs per status.
|
||||
*/
|
||||
counts(): Bottleneck.Counts;
|
||||
|
||||
/**
|
||||
* Returns the status of the job with the provided job id.
|
||||
*/
|
||||
jobStatus(id: string): Bottleneck.Status;
|
||||
|
||||
/**
|
||||
* Returns the status of the job with the provided job id.
|
||||
*/
|
||||
jobs(status?: Bottleneck.Status): string[];
|
||||
|
||||
/**
|
||||
* Returns the number of requests queued.
|
||||
* @param priority - Returns the number of requests queued with the specified priority.
|
||||
*/
|
||||
queued(priority?: number): number;
|
||||
|
||||
/**
|
||||
* Returns the number of requests queued across the Cluster.
|
||||
*/
|
||||
clusterQueued(): Promise<number>;
|
||||
|
||||
/**
|
||||
* Returns whether there are any jobs currently in the queue or in the process of being added to the queue.
|
||||
*/
|
||||
empty(): boolean;
|
||||
|
||||
/**
|
||||
* Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster.
|
||||
*/
|
||||
running(): Promise<number>;
|
||||
|
||||
/**
|
||||
* Returns the total weight of jobs in a DONE state in the Cluster.
|
||||
*/
|
||||
done(): Promise<number>;
|
||||
|
||||
/**
|
||||
* If a request was added right now, would it be run immediately?
|
||||
* @param weight - The weight of the request
|
||||
*/
|
||||
check(weight?: number): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Register an event listener.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
on(name: "error", fn: (error: any) => void): void;
|
||||
on(name: "empty", fn: () => void): void;
|
||||
on(name: "idle", fn: () => void): void;
|
||||
on(name: "depleted", fn: (empty: boolean) => void): void;
|
||||
on(name: "message", fn: (message: string) => void): void;
|
||||
on(name: "debug", fn: (message: string, info: any) => void): void;
|
||||
on(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
|
||||
on(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
|
||||
on(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
|
||||
on(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
|
||||
on(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
on(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise<number | void | null> | number | void | null): void;
|
||||
on(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
on(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
|
||||
/**
|
||||
* Register an event listener for one event only.
|
||||
* @param name - The event name.
|
||||
* @param fn - The callback function.
|
||||
*/
|
||||
once(name: "error", fn: (error: any) => void): void;
|
||||
once(name: "empty", fn: () => void): void;
|
||||
once(name: "idle", fn: () => void): void;
|
||||
once(name: "depleted", fn: (empty: boolean) => void): void;
|
||||
once(name: "message", fn: (message: string) => void): void;
|
||||
once(name: "debug", fn: (message: string, info: any) => void): void;
|
||||
once(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
|
||||
once(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
|
||||
once(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
|
||||
once(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
|
||||
once(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
once(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise<number | void | null> | number | void | null): void;
|
||||
once(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
once(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
|
||||
|
||||
/**
|
||||
* Removes all registered event listeners.
|
||||
* @param name - The optional event name to remove listeners from.
|
||||
*/
|
||||
removeAllListeners(name?: string): void;
|
||||
|
||||
/**
|
||||
* Changes the settings for future requests.
|
||||
* @param options - The new settings.
|
||||
*/
|
||||
updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck;
|
||||
|
||||
/**
|
||||
* Adds to the reservoir count and returns the new value.
|
||||
*/
|
||||
incrementReservoir(incrementBy: number): Promise<number>;
|
||||
|
||||
/**
|
||||
* The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete.
|
||||
*/
|
||||
stop(options?: Bottleneck.StopOptions): Promise<void>;
|
||||
|
||||
/**
|
||||
* Returns the current reservoir count, if any.
|
||||
*/
|
||||
currentReservoir(): Promise<number | null>;
|
||||
|
||||
/**
|
||||
* Chain this limiter to another.
|
||||
* @param limiter - The limiter that requests to this limiter must also follow.
|
||||
*/
|
||||
chain(limiter?: Bottleneck): Bottleneck;
|
||||
|
||||
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
|
||||
wrap<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<R>): ((<%_ for (var idx = 1; idx <= count; idx++) { _%><%_ if (idx > 1) { %>, <% } %>arg<%= idx %>: A<%= idx %><%_ } _%>) => Promise<R>) & { withOptions: (options: Bottleneck.JobOptions<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>) => Promise<R>; };
|
||||
<%_ } _%>
|
||||
|
||||
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
|
||||
submit<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(fn: (<%_ for (var idx = 1; idx <= count; idx++) { _%>arg<%= idx %>: A<%= idx %>, <% } _%>callback: Bottleneck.Callback<R>) => void<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>, callback: Bottleneck.Callback<R>): void;
|
||||
<%_ } _%>
|
||||
|
||||
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
|
||||
submit<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(options: Bottleneck.JobOptions, fn: (<%_ for (var idx = 1; idx <= count; idx++) { _%>arg<%= idx %>: A<%= idx %>, <% } _%>callback: Bottleneck.Callback<R>) => void<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>, callback: Bottleneck.Callback<R>): void;
|
||||
<%_ } _%>
|
||||
|
||||
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
|
||||
schedule<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<R><%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>): Promise<R>;
|
||||
<%_ } _%>
|
||||
|
||||
<%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
|
||||
schedule<R<%_ for (var idx = 1; idx <= count; idx++) { _%>, A<%= idx %><%_ } _%>>(options: Bottleneck.JobOptions, fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<R><%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>): Promise<R>;
|
||||
<%_ } _%>
|
||||
}
|
||||
|
||||
export default Bottleneck;
|
||||
}
|
30
node_modules/bottleneck/bower.json
generated
vendored
Normal file
30
node_modules/bottleneck/bower.json
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
{
|
||||
"name": "bottleneck",
|
||||
"main": "bottleneck.js",
|
||||
"version": "2.19.5",
|
||||
"homepage": "https://github.com/SGrondin/bottleneck",
|
||||
"authors": [
|
||||
"SGrondin <github@simongrondin.name>"
|
||||
],
|
||||
"description": "Distributed task scheduler and rate limiter",
|
||||
"moduleType": [
|
||||
"globals",
|
||||
"node"
|
||||
],
|
||||
"keywords": [
|
||||
"async",
|
||||
"rate",
|
||||
"limiter",
|
||||
"limiting",
|
||||
"throttle",
|
||||
"throttling",
|
||||
"load",
|
||||
"ddos"
|
||||
],
|
||||
"license": "MIT",
|
||||
"ignore": [
|
||||
"**/.*",
|
||||
"node_modules",
|
||||
"bower_components"
|
||||
]
|
||||
}
|
5064
node_modules/bottleneck/es5.js
generated
vendored
Normal file
5064
node_modules/bottleneck/es5.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
66
node_modules/bottleneck/lib/Batcher.js
generated
vendored
Normal file
66
node_modules/bottleneck/lib/Batcher.js
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
"use strict";
|
||||
|
||||
var Batcher, Events, parser;
|
||||
parser = require("./parser");
|
||||
Events = require("./Events");
|
||||
|
||||
Batcher = function () {
|
||||
class Batcher {
|
||||
constructor(options = {}) {
|
||||
this.options = options;
|
||||
parser.load(this.options, this.defaults, this);
|
||||
this.Events = new Events(this);
|
||||
this._arr = [];
|
||||
|
||||
this._resetPromise();
|
||||
|
||||
this._lastFlush = Date.now();
|
||||
}
|
||||
|
||||
_resetPromise() {
|
||||
return this._promise = new this.Promise((res, rej) => {
|
||||
return this._resolve = res;
|
||||
});
|
||||
}
|
||||
|
||||
_flush() {
|
||||
clearTimeout(this._timeout);
|
||||
this._lastFlush = Date.now();
|
||||
|
||||
this._resolve();
|
||||
|
||||
this.Events.trigger("batch", this._arr);
|
||||
this._arr = [];
|
||||
return this._resetPromise();
|
||||
}
|
||||
|
||||
add(data) {
|
||||
var ret;
|
||||
|
||||
this._arr.push(data);
|
||||
|
||||
ret = this._promise;
|
||||
|
||||
if (this._arr.length === this.maxSize) {
|
||||
this._flush();
|
||||
} else if (this.maxTime != null && this._arr.length === 1) {
|
||||
this._timeout = setTimeout(() => {
|
||||
return this._flush();
|
||||
}, this.maxTime);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
;
|
||||
Batcher.prototype.defaults = {
|
||||
maxTime: null,
|
||||
maxSize: null,
|
||||
Promise: Promise
|
||||
};
|
||||
return Batcher;
|
||||
}.call(void 0);
|
||||
|
||||
module.exports = Batcher;
|
594
node_modules/bottleneck/lib/Bottleneck.js
generated
vendored
Normal file
594
node_modules/bottleneck/lib/Bottleneck.js
generated
vendored
Normal file
@ -0,0 +1,594 @@
|
||||
"use strict";
|
||||
|
||||
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
|
||||
|
||||
function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
|
||||
|
||||
function _toArray(arr) { return _arrayWithHoles(arr) || _iterableToArray(arr) || _nonIterableRest(); }
|
||||
|
||||
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
|
||||
|
||||
function _iterableToArray(iter) { if (Symbol.iterator in Object(iter) || Object.prototype.toString.call(iter) === "[object Arguments]") return Array.from(iter); }
|
||||
|
||||
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var Bottleneck,
|
||||
DEFAULT_PRIORITY,
|
||||
Events,
|
||||
Job,
|
||||
LocalDatastore,
|
||||
NUM_PRIORITIES,
|
||||
Queues,
|
||||
RedisDatastore,
|
||||
States,
|
||||
Sync,
|
||||
parser,
|
||||
splice = [].splice;
|
||||
NUM_PRIORITIES = 10;
|
||||
DEFAULT_PRIORITY = 5;
|
||||
parser = require("./parser");
|
||||
Queues = require("./Queues");
|
||||
Job = require("./Job");
|
||||
LocalDatastore = require("./LocalDatastore");
|
||||
RedisDatastore = require("./RedisDatastore");
|
||||
Events = require("./Events");
|
||||
States = require("./States");
|
||||
Sync = require("./Sync");
|
||||
|
||||
Bottleneck = function () {
|
||||
class Bottleneck {
|
||||
constructor(options = {}, ...invalid) {
|
||||
var storeInstanceOptions, storeOptions;
|
||||
this._addToQueue = this._addToQueue.bind(this);
|
||||
|
||||
this._validateOptions(options, invalid);
|
||||
|
||||
parser.load(options, this.instanceDefaults, this);
|
||||
this._queues = new Queues(NUM_PRIORITIES);
|
||||
this._scheduled = {};
|
||||
this._states = new States(["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(this.trackDoneStatus ? ["DONE"] : []));
|
||||
this._limiter = null;
|
||||
this.Events = new Events(this);
|
||||
this._submitLock = new Sync("submit", this.Promise);
|
||||
this._registerLock = new Sync("register", this.Promise);
|
||||
storeOptions = parser.load(options, this.storeDefaults, {});
|
||||
|
||||
this._store = function () {
|
||||
if (this.datastore === "redis" || this.datastore === "ioredis" || this.connection != null) {
|
||||
storeInstanceOptions = parser.load(options, this.redisStoreDefaults, {});
|
||||
return new RedisDatastore(this, storeOptions, storeInstanceOptions);
|
||||
} else if (this.datastore === "local") {
|
||||
storeInstanceOptions = parser.load(options, this.localStoreDefaults, {});
|
||||
return new LocalDatastore(this, storeOptions, storeInstanceOptions);
|
||||
} else {
|
||||
throw new Bottleneck.prototype.BottleneckError(`Invalid datastore type: ${this.datastore}`);
|
||||
}
|
||||
}.call(this);
|
||||
|
||||
this._queues.on("leftzero", () => {
|
||||
var ref;
|
||||
return (ref = this._store.heartbeat) != null ? typeof ref.ref === "function" ? ref.ref() : void 0 : void 0;
|
||||
});
|
||||
|
||||
this._queues.on("zero", () => {
|
||||
var ref;
|
||||
return (ref = this._store.heartbeat) != null ? typeof ref.unref === "function" ? ref.unref() : void 0 : void 0;
|
||||
});
|
||||
}
|
||||
|
||||
_validateOptions(options, invalid) {
|
||||
if (!(options != null && typeof options === "object" && invalid.length === 0)) {
|
||||
throw new Bottleneck.prototype.BottleneckError("Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1.");
|
||||
}
|
||||
}
|
||||
|
||||
ready() {
|
||||
return this._store.ready;
|
||||
}
|
||||
|
||||
clients() {
|
||||
return this._store.clients;
|
||||
}
|
||||
|
||||
channel() {
|
||||
return `b_${this.id}`;
|
||||
}
|
||||
|
||||
channel_client() {
|
||||
return `b_${this.id}_${this._store.clientId}`;
|
||||
}
|
||||
|
||||
publish(message) {
|
||||
return this._store.__publish__(message);
|
||||
}
|
||||
|
||||
disconnect(flush = true) {
|
||||
return this._store.__disconnect__(flush);
|
||||
}
|
||||
|
||||
chain(_limiter) {
|
||||
this._limiter = _limiter;
|
||||
return this;
|
||||
}
|
||||
|
||||
queued(priority) {
|
||||
return this._queues.queued(priority);
|
||||
}
|
||||
|
||||
clusterQueued() {
|
||||
return this._store.__queued__();
|
||||
}
|
||||
|
||||
empty() {
|
||||
return this.queued() === 0 && this._submitLock.isEmpty();
|
||||
}
|
||||
|
||||
running() {
|
||||
return this._store.__running__();
|
||||
}
|
||||
|
||||
done() {
|
||||
return this._store.__done__();
|
||||
}
|
||||
|
||||
jobStatus(id) {
|
||||
return this._states.jobStatus(id);
|
||||
}
|
||||
|
||||
jobs(status) {
|
||||
return this._states.statusJobs(status);
|
||||
}
|
||||
|
||||
counts() {
|
||||
return this._states.statusCounts();
|
||||
}
|
||||
|
||||
_randomIndex() {
|
||||
return Math.random().toString(36).slice(2);
|
||||
}
|
||||
|
||||
check(weight = 1) {
|
||||
return this._store.__check__(weight);
|
||||
}
|
||||
|
||||
_clearGlobalState(index) {
|
||||
if (this._scheduled[index] != null) {
|
||||
clearTimeout(this._scheduled[index].expiration);
|
||||
delete this._scheduled[index];
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
_free(index, job, options, eventInfo) {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var e, running;
|
||||
|
||||
try {
|
||||
var _ref = yield _this._store.__free__(index, options.weight);
|
||||
|
||||
running = _ref.running;
|
||||
|
||||
_this.Events.trigger("debug", `Freed ${options.id}`, eventInfo);
|
||||
|
||||
if (running === 0 && _this.empty()) {
|
||||
return _this.Events.trigger("idle");
|
||||
}
|
||||
} catch (error1) {
|
||||
e = error1;
|
||||
return _this.Events.trigger("error", e);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
_run(index, job, wait) {
|
||||
var clearGlobalState, free, run;
|
||||
job.doRun();
|
||||
clearGlobalState = this._clearGlobalState.bind(this, index);
|
||||
run = this._run.bind(this, index, job);
|
||||
free = this._free.bind(this, index, job);
|
||||
return this._scheduled[index] = {
|
||||
timeout: setTimeout(() => {
|
||||
return job.doExecute(this._limiter, clearGlobalState, run, free);
|
||||
}, wait),
|
||||
expiration: job.options.expiration != null ? setTimeout(function () {
|
||||
return job.doExpire(clearGlobalState, run, free);
|
||||
}, wait + job.options.expiration) : void 0,
|
||||
job: job
|
||||
};
|
||||
}
|
||||
|
||||
_drainOne(capacity) {
|
||||
return this._registerLock.schedule(() => {
|
||||
var args, index, next, options, queue;
|
||||
|
||||
if (this.queued() === 0) {
|
||||
return this.Promise.resolve(null);
|
||||
}
|
||||
|
||||
queue = this._queues.getFirst();
|
||||
|
||||
var _next2 = next = queue.first();
|
||||
|
||||
options = _next2.options;
|
||||
args = _next2.args;
|
||||
|
||||
if (capacity != null && options.weight > capacity) {
|
||||
return this.Promise.resolve(null);
|
||||
}
|
||||
|
||||
this.Events.trigger("debug", `Draining ${options.id}`, {
|
||||
args,
|
||||
options
|
||||
});
|
||||
index = this._randomIndex();
|
||||
return this._store.__register__(index, options.weight, options.expiration).then(({
|
||||
success,
|
||||
wait,
|
||||
reservoir
|
||||
}) => {
|
||||
var empty;
|
||||
this.Events.trigger("debug", `Drained ${options.id}`, {
|
||||
success,
|
||||
args,
|
||||
options
|
||||
});
|
||||
|
||||
if (success) {
|
||||
queue.shift();
|
||||
empty = this.empty();
|
||||
|
||||
if (empty) {
|
||||
this.Events.trigger("empty");
|
||||
}
|
||||
|
||||
if (reservoir === 0) {
|
||||
this.Events.trigger("depleted", empty);
|
||||
}
|
||||
|
||||
this._run(index, next, wait);
|
||||
|
||||
return this.Promise.resolve(options.weight);
|
||||
} else {
|
||||
return this.Promise.resolve(null);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
_drainAll(capacity, total = 0) {
|
||||
return this._drainOne(capacity).then(drained => {
|
||||
var newCapacity;
|
||||
|
||||
if (drained != null) {
|
||||
newCapacity = capacity != null ? capacity - drained : capacity;
|
||||
return this._drainAll(newCapacity, total + drained);
|
||||
} else {
|
||||
return this.Promise.resolve(total);
|
||||
}
|
||||
}).catch(e => {
|
||||
return this.Events.trigger("error", e);
|
||||
});
|
||||
}
|
||||
|
||||
_dropAllQueued(message) {
|
||||
return this._queues.shiftAll(function (job) {
|
||||
return job.doDrop({
|
||||
message
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
stop(options = {}) {
|
||||
var done, waitForExecuting;
|
||||
options = parser.load(options, this.stopDefaults);
|
||||
|
||||
waitForExecuting = at => {
|
||||
var finished;
|
||||
|
||||
finished = () => {
|
||||
var counts;
|
||||
counts = this._states.counts;
|
||||
return counts[0] + counts[1] + counts[2] + counts[3] === at;
|
||||
};
|
||||
|
||||
return new this.Promise((resolve, reject) => {
|
||||
if (finished()) {
|
||||
return resolve();
|
||||
} else {
|
||||
return this.on("done", () => {
|
||||
if (finished()) {
|
||||
this.removeAllListeners("done");
|
||||
return resolve();
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
done = options.dropWaitingJobs ? (this._run = function (index, next) {
|
||||
return next.doDrop({
|
||||
message: options.dropErrorMessage
|
||||
});
|
||||
}, this._drainOne = () => {
|
||||
return this.Promise.resolve(null);
|
||||
}, this._registerLock.schedule(() => {
|
||||
return this._submitLock.schedule(() => {
|
||||
var k, ref, v;
|
||||
ref = this._scheduled;
|
||||
|
||||
for (k in ref) {
|
||||
v = ref[k];
|
||||
|
||||
if (this.jobStatus(v.job.options.id) === "RUNNING") {
|
||||
clearTimeout(v.timeout);
|
||||
clearTimeout(v.expiration);
|
||||
v.job.doDrop({
|
||||
message: options.dropErrorMessage
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
this._dropAllQueued(options.dropErrorMessage);
|
||||
|
||||
return waitForExecuting(0);
|
||||
});
|
||||
})) : this.schedule({
|
||||
priority: NUM_PRIORITIES - 1,
|
||||
weight: 0
|
||||
}, () => {
|
||||
return waitForExecuting(1);
|
||||
});
|
||||
|
||||
this._receive = function (job) {
|
||||
return job._reject(new Bottleneck.prototype.BottleneckError(options.enqueueErrorMessage));
|
||||
};
|
||||
|
||||
this.stop = () => {
|
||||
return this.Promise.reject(new Bottleneck.prototype.BottleneckError("stop() has already been called"));
|
||||
};
|
||||
|
||||
return done;
|
||||
}
|
||||
|
||||
_addToQueue(job) {
|
||||
var _this2 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var args, blocked, error, options, reachedHWM, shifted, strategy;
|
||||
args = job.args;
|
||||
options = job.options;
|
||||
|
||||
try {
|
||||
var _ref2 = yield _this2._store.__submit__(_this2.queued(), options.weight);
|
||||
|
||||
reachedHWM = _ref2.reachedHWM;
|
||||
blocked = _ref2.blocked;
|
||||
strategy = _ref2.strategy;
|
||||
} catch (error1) {
|
||||
error = error1;
|
||||
|
||||
_this2.Events.trigger("debug", `Could not queue ${options.id}`, {
|
||||
args,
|
||||
options,
|
||||
error
|
||||
});
|
||||
|
||||
job.doDrop({
|
||||
error
|
||||
});
|
||||
return false;
|
||||
}
|
||||
|
||||
if (blocked) {
|
||||
job.doDrop();
|
||||
return true;
|
||||
} else if (reachedHWM) {
|
||||
shifted = strategy === Bottleneck.prototype.strategy.LEAK ? _this2._queues.shiftLastFrom(options.priority) : strategy === Bottleneck.prototype.strategy.OVERFLOW_PRIORITY ? _this2._queues.shiftLastFrom(options.priority + 1) : strategy === Bottleneck.prototype.strategy.OVERFLOW ? job : void 0;
|
||||
|
||||
if (shifted != null) {
|
||||
shifted.doDrop();
|
||||
}
|
||||
|
||||
if (shifted == null || strategy === Bottleneck.prototype.strategy.OVERFLOW) {
|
||||
if (shifted == null) {
|
||||
job.doDrop();
|
||||
}
|
||||
|
||||
return reachedHWM;
|
||||
}
|
||||
}
|
||||
|
||||
job.doQueue(reachedHWM, blocked);
|
||||
|
||||
_this2._queues.push(job);
|
||||
|
||||
yield _this2._drainAll();
|
||||
return reachedHWM;
|
||||
})();
|
||||
}
|
||||
|
||||
_receive(job) {
|
||||
if (this._states.jobStatus(job.options.id) != null) {
|
||||
job._reject(new Bottleneck.prototype.BottleneckError(`A job with the same id already exists (id=${job.options.id})`));
|
||||
|
||||
return false;
|
||||
} else {
|
||||
job.doReceive();
|
||||
return this._submitLock.schedule(this._addToQueue, job);
|
||||
}
|
||||
}
|
||||
|
||||
submit(...args) {
|
||||
var cb, fn, job, options, ref, ref1, task;
|
||||
|
||||
if (typeof args[0] === "function") {
|
||||
var _ref3, _ref4, _splice$call, _splice$call2;
|
||||
|
||||
ref = args, (_ref3 = ref, _ref4 = _toArray(_ref3), fn = _ref4[0], args = _ref4.slice(1), _ref3), (_splice$call = splice.call(args, -1), _splice$call2 = _slicedToArray(_splice$call, 1), cb = _splice$call2[0], _splice$call);
|
||||
options = parser.load({}, this.jobDefaults);
|
||||
} else {
|
||||
var _ref5, _ref6, _splice$call3, _splice$call4;
|
||||
|
||||
ref1 = args, (_ref5 = ref1, _ref6 = _toArray(_ref5), options = _ref6[0], fn = _ref6[1], args = _ref6.slice(2), _ref5), (_splice$call3 = splice.call(args, -1), _splice$call4 = _slicedToArray(_splice$call3, 1), cb = _splice$call4[0], _splice$call3);
|
||||
options = parser.load(options, this.jobDefaults);
|
||||
}
|
||||
|
||||
task = (...args) => {
|
||||
return new this.Promise(function (resolve, reject) {
|
||||
return fn(...args, function (...args) {
|
||||
return (args[0] != null ? reject : resolve)(args);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
job = new Job(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
|
||||
job.promise.then(function (args) {
|
||||
return typeof cb === "function" ? cb(...args) : void 0;
|
||||
}).catch(function (args) {
|
||||
if (Array.isArray(args)) {
|
||||
return typeof cb === "function" ? cb(...args) : void 0;
|
||||
} else {
|
||||
return typeof cb === "function" ? cb(args) : void 0;
|
||||
}
|
||||
});
|
||||
return this._receive(job);
|
||||
}
|
||||
|
||||
schedule(...args) {
|
||||
var job, options, task;
|
||||
|
||||
if (typeof args[0] === "function") {
|
||||
var _args = args;
|
||||
|
||||
var _args2 = _toArray(_args);
|
||||
|
||||
task = _args2[0];
|
||||
args = _args2.slice(1);
|
||||
options = {};
|
||||
} else {
|
||||
var _args3 = args;
|
||||
|
||||
var _args4 = _toArray(_args3);
|
||||
|
||||
options = _args4[0];
|
||||
task = _args4[1];
|
||||
args = _args4.slice(2);
|
||||
}
|
||||
|
||||
job = new Job(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
|
||||
|
||||
this._receive(job);
|
||||
|
||||
return job.promise;
|
||||
}
|
||||
|
||||
wrap(fn) {
|
||||
var schedule, wrapped;
|
||||
schedule = this.schedule.bind(this);
|
||||
|
||||
wrapped = function wrapped(...args) {
|
||||
return schedule(fn.bind(this), ...args);
|
||||
};
|
||||
|
||||
wrapped.withOptions = function (options, ...args) {
|
||||
return schedule(options, fn, ...args);
|
||||
};
|
||||
|
||||
return wrapped;
|
||||
}
|
||||
|
||||
updateSettings(options = {}) {
|
||||
var _this3 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this3._store.__updateSettings__(parser.overwrite(options, _this3.storeDefaults));
|
||||
parser.overwrite(options, _this3.instanceDefaults, _this3);
|
||||
return _this3;
|
||||
})();
|
||||
}
|
||||
|
||||
currentReservoir() {
|
||||
return this._store.__currentReservoir__();
|
||||
}
|
||||
|
||||
incrementReservoir(incr = 0) {
|
||||
return this._store.__incrementReservoir__(incr);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
;
|
||||
Bottleneck.default = Bottleneck;
|
||||
Bottleneck.Events = Events;
|
||||
Bottleneck.version = Bottleneck.prototype.version = require("./version.json").version;
|
||||
Bottleneck.strategy = Bottleneck.prototype.strategy = {
|
||||
LEAK: 1,
|
||||
OVERFLOW: 2,
|
||||
OVERFLOW_PRIORITY: 4,
|
||||
BLOCK: 3
|
||||
};
|
||||
Bottleneck.BottleneckError = Bottleneck.prototype.BottleneckError = require("./BottleneckError");
|
||||
Bottleneck.Group = Bottleneck.prototype.Group = require("./Group");
|
||||
Bottleneck.RedisConnection = Bottleneck.prototype.RedisConnection = require("./RedisConnection");
|
||||
Bottleneck.IORedisConnection = Bottleneck.prototype.IORedisConnection = require("./IORedisConnection");
|
||||
Bottleneck.Batcher = Bottleneck.prototype.Batcher = require("./Batcher");
|
||||
Bottleneck.prototype.jobDefaults = {
|
||||
priority: DEFAULT_PRIORITY,
|
||||
weight: 1,
|
||||
expiration: null,
|
||||
id: "<no-id>"
|
||||
};
|
||||
Bottleneck.prototype.storeDefaults = {
|
||||
maxConcurrent: null,
|
||||
minTime: 0,
|
||||
highWater: null,
|
||||
strategy: Bottleneck.prototype.strategy.LEAK,
|
||||
penalty: null,
|
||||
reservoir: null,
|
||||
reservoirRefreshInterval: null,
|
||||
reservoirRefreshAmount: null,
|
||||
reservoirIncreaseInterval: null,
|
||||
reservoirIncreaseAmount: null,
|
||||
reservoirIncreaseMaximum: null
|
||||
};
|
||||
Bottleneck.prototype.localStoreDefaults = {
|
||||
Promise: Promise,
|
||||
timeout: null,
|
||||
heartbeatInterval: 250
|
||||
};
|
||||
Bottleneck.prototype.redisStoreDefaults = {
|
||||
Promise: Promise,
|
||||
timeout: null,
|
||||
heartbeatInterval: 5000,
|
||||
clientTimeout: 10000,
|
||||
Redis: null,
|
||||
clientOptions: {},
|
||||
clusterNodes: null,
|
||||
clearDatastore: false,
|
||||
connection: null
|
||||
};
|
||||
Bottleneck.prototype.instanceDefaults = {
|
||||
datastore: "local",
|
||||
connection: null,
|
||||
id: "<no-id>",
|
||||
rejectOnDrop: true,
|
||||
trackDoneStatus: false,
|
||||
Promise: Promise
|
||||
};
|
||||
Bottleneck.prototype.stopDefaults = {
|
||||
enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs.",
|
||||
dropWaitingJobs: true,
|
||||
dropErrorMessage: "This limiter has been stopped."
|
||||
};
|
||||
return Bottleneck;
|
||||
}.call(void 0);
|
||||
|
||||
module.exports = Bottleneck;
|
5
node_modules/bottleneck/lib/BottleneckError.js
generated
vendored
Normal file
5
node_modules/bottleneck/lib/BottleneckError.js
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
"use strict";
|
||||
|
||||
var BottleneckError;
|
||||
BottleneckError = class BottleneckError extends Error {};
|
||||
module.exports = BottleneckError;
|
107
node_modules/bottleneck/lib/DLList.js
generated
vendored
Normal file
107
node_modules/bottleneck/lib/DLList.js
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
"use strict";
|
||||
|
||||
var DLList;
|
||||
DLList = class DLList {
|
||||
constructor(incr, decr) {
|
||||
this.incr = incr;
|
||||
this.decr = decr;
|
||||
this._first = null;
|
||||
this._last = null;
|
||||
this.length = 0;
|
||||
}
|
||||
|
||||
push(value) {
|
||||
var node;
|
||||
this.length++;
|
||||
|
||||
if (typeof this.incr === "function") {
|
||||
this.incr();
|
||||
}
|
||||
|
||||
node = {
|
||||
value,
|
||||
prev: this._last,
|
||||
next: null
|
||||
};
|
||||
|
||||
if (this._last != null) {
|
||||
this._last.next = node;
|
||||
this._last = node;
|
||||
} else {
|
||||
this._first = this._last = node;
|
||||
}
|
||||
|
||||
return void 0;
|
||||
}
|
||||
|
||||
shift() {
|
||||
var value;
|
||||
|
||||
if (this._first == null) {
|
||||
return;
|
||||
} else {
|
||||
this.length--;
|
||||
|
||||
if (typeof this.decr === "function") {
|
||||
this.decr();
|
||||
}
|
||||
}
|
||||
|
||||
value = this._first.value;
|
||||
|
||||
if ((this._first = this._first.next) != null) {
|
||||
this._first.prev = null;
|
||||
} else {
|
||||
this._last = null;
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
first() {
|
||||
if (this._first != null) {
|
||||
return this._first.value;
|
||||
}
|
||||
}
|
||||
|
||||
getArray() {
|
||||
var node, ref, results;
|
||||
node = this._first;
|
||||
results = [];
|
||||
|
||||
while (node != null) {
|
||||
results.push((ref = node, node = node.next, ref.value));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
forEachShift(cb) {
|
||||
var node;
|
||||
node = this.shift();
|
||||
|
||||
while (node != null) {
|
||||
cb(node), node = this.shift();
|
||||
}
|
||||
|
||||
return void 0;
|
||||
}
|
||||
|
||||
debug() {
|
||||
var node, ref, ref1, ref2, results;
|
||||
node = this._first;
|
||||
results = [];
|
||||
|
||||
while (node != null) {
|
||||
results.push((ref = node, node = node.next, {
|
||||
value: ref.value,
|
||||
prev: (ref1 = ref.prev) != null ? ref1.value : void 0,
|
||||
next: (ref2 = ref.next) != null ? ref2.value : void 0
|
||||
}));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
};
|
||||
module.exports = DLList;
|
128
node_modules/bottleneck/lib/Events.js
generated
vendored
Normal file
128
node_modules/bottleneck/lib/Events.js
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
"use strict";
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var Events;
|
||||
Events = class Events {
|
||||
constructor(instance) {
|
||||
this.instance = instance;
|
||||
this._events = {};
|
||||
|
||||
if (this.instance.on != null || this.instance.once != null || this.instance.removeAllListeners != null) {
|
||||
throw new Error("An Emitter already exists for this object");
|
||||
}
|
||||
|
||||
this.instance.on = (name, cb) => {
|
||||
return this._addListener(name, "many", cb);
|
||||
};
|
||||
|
||||
this.instance.once = (name, cb) => {
|
||||
return this._addListener(name, "once", cb);
|
||||
};
|
||||
|
||||
this.instance.removeAllListeners = (name = null) => {
|
||||
if (name != null) {
|
||||
return delete this._events[name];
|
||||
} else {
|
||||
return this._events = {};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
_addListener(name, status, cb) {
|
||||
var base;
|
||||
|
||||
if ((base = this._events)[name] == null) {
|
||||
base[name] = [];
|
||||
}
|
||||
|
||||
this._events[name].push({
|
||||
cb,
|
||||
status
|
||||
});
|
||||
|
||||
return this.instance;
|
||||
}
|
||||
|
||||
listenerCount(name) {
|
||||
if (this._events[name] != null) {
|
||||
return this._events[name].length;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
trigger(name, ...args) {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var e, promises;
|
||||
|
||||
try {
|
||||
if (name !== "debug") {
|
||||
_this.trigger("debug", `Event triggered: ${name}`, args);
|
||||
}
|
||||
|
||||
if (_this._events[name] == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
_this._events[name] = _this._events[name].filter(function (listener) {
|
||||
return listener.status !== "none";
|
||||
});
|
||||
promises = _this._events[name].map(
|
||||
/*#__PURE__*/
|
||||
function () {
|
||||
var _ref = _asyncToGenerator(function* (listener) {
|
||||
var e, returned;
|
||||
|
||||
if (listener.status === "none") {
|
||||
return;
|
||||
}
|
||||
|
||||
if (listener.status === "once") {
|
||||
listener.status = "none";
|
||||
}
|
||||
|
||||
try {
|
||||
returned = typeof listener.cb === "function" ? listener.cb(...args) : void 0;
|
||||
|
||||
if (typeof (returned != null ? returned.then : void 0) === "function") {
|
||||
return yield returned;
|
||||
} else {
|
||||
return returned;
|
||||
}
|
||||
} catch (error) {
|
||||
e = error;
|
||||
|
||||
if ("name" !== "error") {
|
||||
_this.trigger("error", e);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
return function (_x) {
|
||||
return _ref.apply(this, arguments);
|
||||
};
|
||||
}());
|
||||
return (yield Promise.all(promises)).find(function (x) {
|
||||
return x != null;
|
||||
});
|
||||
} catch (error) {
|
||||
e = error;
|
||||
|
||||
if ("name" !== "error") {
|
||||
_this.trigger("error", e);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
};
|
||||
module.exports = Events;
|
198
node_modules/bottleneck/lib/Group.js
generated
vendored
Normal file
198
node_modules/bottleneck/lib/Group.js
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
"use strict";
|
||||
|
||||
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
|
||||
|
||||
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
|
||||
|
||||
function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
|
||||
|
||||
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var Events, Group, IORedisConnection, RedisConnection, Scripts, parser;
|
||||
parser = require("./parser");
|
||||
Events = require("./Events");
|
||||
RedisConnection = require("./RedisConnection");
|
||||
IORedisConnection = require("./IORedisConnection");
|
||||
Scripts = require("./Scripts");
|
||||
|
||||
Group = function () {
|
||||
class Group {
|
||||
constructor(limiterOptions = {}) {
|
||||
this.deleteKey = this.deleteKey.bind(this);
|
||||
this.limiterOptions = limiterOptions;
|
||||
parser.load(this.limiterOptions, this.defaults, this);
|
||||
this.Events = new Events(this);
|
||||
this.instances = {};
|
||||
this.Bottleneck = require("./Bottleneck");
|
||||
|
||||
this._startAutoCleanup();
|
||||
|
||||
this.sharedConnection = this.connection != null;
|
||||
|
||||
if (this.connection == null) {
|
||||
if (this.limiterOptions.datastore === "redis") {
|
||||
this.connection = new RedisConnection(Object.assign({}, this.limiterOptions, {
|
||||
Events: this.Events
|
||||
}));
|
||||
} else if (this.limiterOptions.datastore === "ioredis") {
|
||||
this.connection = new IORedisConnection(Object.assign({}, this.limiterOptions, {
|
||||
Events: this.Events
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
key(key = "") {
|
||||
var ref;
|
||||
return (ref = this.instances[key]) != null ? ref : (() => {
|
||||
var limiter;
|
||||
limiter = this.instances[key] = new this.Bottleneck(Object.assign(this.limiterOptions, {
|
||||
id: `${this.id}-${key}`,
|
||||
timeout: this.timeout,
|
||||
connection: this.connection
|
||||
}));
|
||||
this.Events.trigger("created", limiter, key);
|
||||
return limiter;
|
||||
})();
|
||||
}
|
||||
|
||||
deleteKey(key = "") {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var deleted, instance;
|
||||
instance = _this.instances[key];
|
||||
|
||||
if (_this.connection) {
|
||||
deleted = yield _this.connection.__runCommand__(['del', ...Scripts.allKeys(`${_this.id}-${key}`)]);
|
||||
}
|
||||
|
||||
if (instance != null) {
|
||||
delete _this.instances[key];
|
||||
yield instance.disconnect();
|
||||
}
|
||||
|
||||
return instance != null || deleted > 0;
|
||||
})();
|
||||
}
|
||||
|
||||
limiters() {
|
||||
var k, ref, results, v;
|
||||
ref = this.instances;
|
||||
results = [];
|
||||
|
||||
for (k in ref) {
|
||||
v = ref[k];
|
||||
results.push({
|
||||
key: k,
|
||||
limiter: v
|
||||
});
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
keys() {
|
||||
return Object.keys(this.instances);
|
||||
}
|
||||
|
||||
clusterKeys() {
|
||||
var _this2 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var cursor, end, found, i, k, keys, len, next, start;
|
||||
|
||||
if (_this2.connection == null) {
|
||||
return _this2.Promise.resolve(_this2.keys());
|
||||
}
|
||||
|
||||
keys = [];
|
||||
cursor = null;
|
||||
start = `b_${_this2.id}-`.length;
|
||||
end = "_settings".length;
|
||||
|
||||
while (cursor !== 0) {
|
||||
var _ref = yield _this2.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", `b_${_this2.id}-*_settings`, "count", 10000]);
|
||||
|
||||
var _ref2 = _slicedToArray(_ref, 2);
|
||||
|
||||
next = _ref2[0];
|
||||
found = _ref2[1];
|
||||
cursor = ~~next;
|
||||
|
||||
for (i = 0, len = found.length; i < len; i++) {
|
||||
k = found[i];
|
||||
keys.push(k.slice(start, -end));
|
||||
}
|
||||
}
|
||||
|
||||
return keys;
|
||||
})();
|
||||
}
|
||||
|
||||
_startAutoCleanup() {
|
||||
var _this3 = this;
|
||||
|
||||
var base;
|
||||
clearInterval(this.interval);
|
||||
return typeof (base = this.interval = setInterval(
|
||||
/*#__PURE__*/
|
||||
_asyncToGenerator(function* () {
|
||||
var e, k, ref, results, time, v;
|
||||
time = Date.now();
|
||||
ref = _this3.instances;
|
||||
results = [];
|
||||
|
||||
for (k in ref) {
|
||||
v = ref[k];
|
||||
|
||||
try {
|
||||
if (yield v._store.__groupCheck__(time)) {
|
||||
results.push(_this3.deleteKey(k));
|
||||
} else {
|
||||
results.push(void 0);
|
||||
}
|
||||
} catch (error) {
|
||||
e = error;
|
||||
results.push(v.Events.trigger("error", e));
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}), this.timeout / 2)).unref === "function" ? base.unref() : void 0;
|
||||
}
|
||||
|
||||
updateSettings(options = {}) {
|
||||
parser.overwrite(options, this.defaults, this);
|
||||
parser.overwrite(options, options, this.limiterOptions);
|
||||
|
||||
if (options.timeout != null) {
|
||||
return this._startAutoCleanup();
|
||||
}
|
||||
}
|
||||
|
||||
disconnect(flush = true) {
|
||||
var ref;
|
||||
|
||||
if (!this.sharedConnection) {
|
||||
return (ref = this.connection) != null ? ref.disconnect(flush) : void 0;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
;
|
||||
Group.prototype.defaults = {
|
||||
timeout: 1000 * 60 * 5,
|
||||
connection: null,
|
||||
Promise: Promise,
|
||||
id: "group-key"
|
||||
};
|
||||
return Group;
|
||||
}.call(void 0);
|
||||
|
||||
module.exports = Group;
|
186
node_modules/bottleneck/lib/IORedisConnection.js
generated
vendored
Normal file
186
node_modules/bottleneck/lib/IORedisConnection.js
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
||||
"use strict";
|
||||
|
||||
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
|
||||
|
||||
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
|
||||
|
||||
function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
|
||||
|
||||
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var Events, IORedisConnection, Scripts, parser;
|
||||
parser = require("./parser");
|
||||
Events = require("./Events");
|
||||
Scripts = require("./Scripts");
|
||||
|
||||
IORedisConnection = function () {
|
||||
class IORedisConnection {
|
||||
constructor(options = {}) {
|
||||
parser.load(options, this.defaults, this);
|
||||
|
||||
if (this.Redis == null) {
|
||||
this.Redis = eval("require")("ioredis"); // Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option.
|
||||
}
|
||||
|
||||
if (this.Events == null) {
|
||||
this.Events = new Events(this);
|
||||
}
|
||||
|
||||
this.terminated = false;
|
||||
|
||||
if (this.clusterNodes != null) {
|
||||
this.client = new this.Redis.Cluster(this.clusterNodes, this.clientOptions);
|
||||
this.subscriber = new this.Redis.Cluster(this.clusterNodes, this.clientOptions);
|
||||
} else if (this.client != null && this.client.duplicate == null) {
|
||||
this.subscriber = new this.Redis.Cluster(this.client.startupNodes, this.client.options);
|
||||
} else {
|
||||
if (this.client == null) {
|
||||
this.client = new this.Redis(this.clientOptions);
|
||||
}
|
||||
|
||||
this.subscriber = this.client.duplicate();
|
||||
}
|
||||
|
||||
this.limiters = {};
|
||||
this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => {
|
||||
this._loadScripts();
|
||||
|
||||
return {
|
||||
client: this.client,
|
||||
subscriber: this.subscriber
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
_setup(client, sub) {
|
||||
client.setMaxListeners(0);
|
||||
return new this.Promise((resolve, reject) => {
|
||||
client.on("error", e => {
|
||||
return this.Events.trigger("error", e);
|
||||
});
|
||||
|
||||
if (sub) {
|
||||
client.on("message", (channel, message) => {
|
||||
var ref;
|
||||
return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0;
|
||||
});
|
||||
}
|
||||
|
||||
if (client.status === "ready") {
|
||||
return resolve();
|
||||
} else {
|
||||
return client.once("ready", resolve);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
_loadScripts() {
|
||||
return Scripts.names.forEach(name => {
|
||||
return this.client.defineCommand(name, {
|
||||
lua: Scripts.payload(name)
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
__runCommand__(cmd) {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var _, deleted;
|
||||
|
||||
yield _this.ready;
|
||||
|
||||
var _ref = yield _this.client.pipeline([cmd]).exec();
|
||||
|
||||
var _ref2 = _slicedToArray(_ref, 1);
|
||||
|
||||
var _ref2$ = _slicedToArray(_ref2[0], 2);
|
||||
|
||||
_ = _ref2$[0];
|
||||
deleted = _ref2$[1];
|
||||
return deleted;
|
||||
})();
|
||||
}
|
||||
|
||||
__addLimiter__(instance) {
|
||||
return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => {
|
||||
return new this.Promise((resolve, reject) => {
|
||||
return this.subscriber.subscribe(channel, () => {
|
||||
this.limiters[channel] = instance;
|
||||
return resolve();
|
||||
});
|
||||
});
|
||||
}));
|
||||
}
|
||||
|
||||
__removeLimiter__(instance) {
|
||||
var _this2 = this;
|
||||
|
||||
return [instance.channel(), instance.channel_client()].forEach(
|
||||
/*#__PURE__*/
|
||||
function () {
|
||||
var _ref3 = _asyncToGenerator(function* (channel) {
|
||||
if (!_this2.terminated) {
|
||||
yield _this2.subscriber.unsubscribe(channel);
|
||||
}
|
||||
|
||||
return delete _this2.limiters[channel];
|
||||
});
|
||||
|
||||
return function (_x) {
|
||||
return _ref3.apply(this, arguments);
|
||||
};
|
||||
}());
|
||||
}
|
||||
|
||||
__scriptArgs__(name, id, args, cb) {
|
||||
var keys;
|
||||
keys = Scripts.keys(name, id);
|
||||
return [keys.length].concat(keys, args, cb);
|
||||
}
|
||||
|
||||
__scriptFn__(name) {
|
||||
return this.client[name].bind(this.client);
|
||||
}
|
||||
|
||||
disconnect(flush = true) {
|
||||
var i, k, len, ref;
|
||||
ref = Object.keys(this.limiters);
|
||||
|
||||
for (i = 0, len = ref.length; i < len; i++) {
|
||||
k = ref[i];
|
||||
clearInterval(this.limiters[k]._store.heartbeat);
|
||||
}
|
||||
|
||||
this.limiters = {};
|
||||
this.terminated = true;
|
||||
|
||||
if (flush) {
|
||||
return this.Promise.all([this.client.quit(), this.subscriber.quit()]);
|
||||
} else {
|
||||
this.client.disconnect();
|
||||
this.subscriber.disconnect();
|
||||
return this.Promise.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
;
|
||||
IORedisConnection.prototype.datastore = "ioredis";
|
||||
IORedisConnection.prototype.defaults = {
|
||||
Redis: null,
|
||||
clientOptions: {},
|
||||
clusterNodes: null,
|
||||
client: null,
|
||||
Promise: Promise,
|
||||
Events: null
|
||||
};
|
||||
return IORedisConnection;
|
||||
}.call(void 0);
|
||||
|
||||
module.exports = IORedisConnection;
|
215
node_modules/bottleneck/lib/Job.js
generated
vendored
Normal file
215
node_modules/bottleneck/lib/Job.js
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
"use strict";
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var BottleneckError, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser;
|
||||
NUM_PRIORITIES = 10;
|
||||
DEFAULT_PRIORITY = 5;
|
||||
parser = require("./parser");
|
||||
BottleneckError = require("./BottleneckError");
|
||||
Job = class Job {
|
||||
constructor(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) {
|
||||
this.task = task;
|
||||
this.args = args;
|
||||
this.rejectOnDrop = rejectOnDrop;
|
||||
this.Events = Events;
|
||||
this._states = _states;
|
||||
this.Promise = Promise;
|
||||
this.options = parser.load(options, jobDefaults);
|
||||
this.options.priority = this._sanitizePriority(this.options.priority);
|
||||
|
||||
if (this.options.id === jobDefaults.id) {
|
||||
this.options.id = `${this.options.id}-${this._randomIndex()}`;
|
||||
}
|
||||
|
||||
this.promise = new this.Promise((_resolve, _reject) => {
|
||||
this._resolve = _resolve;
|
||||
this._reject = _reject;
|
||||
});
|
||||
this.retryCount = 0;
|
||||
}
|
||||
|
||||
_sanitizePriority(priority) {
|
||||
var sProperty;
|
||||
sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority;
|
||||
|
||||
if (sProperty < 0) {
|
||||
return 0;
|
||||
} else if (sProperty > NUM_PRIORITIES - 1) {
|
||||
return NUM_PRIORITIES - 1;
|
||||
} else {
|
||||
return sProperty;
|
||||
}
|
||||
}
|
||||
|
||||
_randomIndex() {
|
||||
return Math.random().toString(36).slice(2);
|
||||
}
|
||||
|
||||
doDrop({
|
||||
error,
|
||||
message = "This job has been dropped by Bottleneck"
|
||||
} = {}) {
|
||||
if (this._states.remove(this.options.id)) {
|
||||
if (this.rejectOnDrop) {
|
||||
this._reject(error != null ? error : new BottleneckError(message));
|
||||
}
|
||||
|
||||
this.Events.trigger("dropped", {
|
||||
args: this.args,
|
||||
options: this.options,
|
||||
task: this.task,
|
||||
promise: this.promise
|
||||
});
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
_assertStatus(expected) {
|
||||
var status;
|
||||
status = this._states.jobStatus(this.options.id);
|
||||
|
||||
if (!(status === expected || expected === "DONE" && status === null)) {
|
||||
throw new BottleneckError(`Invalid job status ${status}, expected ${expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues`);
|
||||
}
|
||||
}
|
||||
|
||||
doReceive() {
|
||||
this._states.start(this.options.id);
|
||||
|
||||
return this.Events.trigger("received", {
|
||||
args: this.args,
|
||||
options: this.options
|
||||
});
|
||||
}
|
||||
|
||||
doQueue(reachedHWM, blocked) {
|
||||
this._assertStatus("RECEIVED");
|
||||
|
||||
this._states.next(this.options.id);
|
||||
|
||||
return this.Events.trigger("queued", {
|
||||
args: this.args,
|
||||
options: this.options,
|
||||
reachedHWM,
|
||||
blocked
|
||||
});
|
||||
}
|
||||
|
||||
doRun() {
|
||||
if (this.retryCount === 0) {
|
||||
this._assertStatus("QUEUED");
|
||||
|
||||
this._states.next(this.options.id);
|
||||
} else {
|
||||
this._assertStatus("EXECUTING");
|
||||
}
|
||||
|
||||
return this.Events.trigger("scheduled", {
|
||||
args: this.args,
|
||||
options: this.options
|
||||
});
|
||||
}
|
||||
|
||||
doExecute(chained, clearGlobalState, run, free) {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var error, eventInfo, passed;
|
||||
|
||||
if (_this.retryCount === 0) {
|
||||
_this._assertStatus("RUNNING");
|
||||
|
||||
_this._states.next(_this.options.id);
|
||||
} else {
|
||||
_this._assertStatus("EXECUTING");
|
||||
}
|
||||
|
||||
eventInfo = {
|
||||
args: _this.args,
|
||||
options: _this.options,
|
||||
retryCount: _this.retryCount
|
||||
};
|
||||
|
||||
_this.Events.trigger("executing", eventInfo);
|
||||
|
||||
try {
|
||||
passed = yield chained != null ? chained.schedule(_this.options, _this.task, ..._this.args) : _this.task(..._this.args);
|
||||
|
||||
if (clearGlobalState()) {
|
||||
_this.doDone(eventInfo);
|
||||
|
||||
yield free(_this.options, eventInfo);
|
||||
|
||||
_this._assertStatus("DONE");
|
||||
|
||||
return _this._resolve(passed);
|
||||
}
|
||||
} catch (error1) {
|
||||
error = error1;
|
||||
return _this._onFailure(error, eventInfo, clearGlobalState, run, free);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
doExpire(clearGlobalState, run, free) {
|
||||
var error, eventInfo;
|
||||
|
||||
if (this._states.jobStatus(this.options.id === "RUNNING")) {
|
||||
this._states.next(this.options.id);
|
||||
}
|
||||
|
||||
this._assertStatus("EXECUTING");
|
||||
|
||||
eventInfo = {
|
||||
args: this.args,
|
||||
options: this.options,
|
||||
retryCount: this.retryCount
|
||||
};
|
||||
error = new BottleneckError(`This job timed out after ${this.options.expiration} ms.`);
|
||||
return this._onFailure(error, eventInfo, clearGlobalState, run, free);
|
||||
}
|
||||
|
||||
_onFailure(error, eventInfo, clearGlobalState, run, free) {
|
||||
var _this2 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var retry, retryAfter;
|
||||
|
||||
if (clearGlobalState()) {
|
||||
retry = yield _this2.Events.trigger("failed", error, eventInfo);
|
||||
|
||||
if (retry != null) {
|
||||
retryAfter = ~~retry;
|
||||
|
||||
_this2.Events.trigger("retry", `Retrying ${_this2.options.id} after ${retryAfter} ms`, eventInfo);
|
||||
|
||||
_this2.retryCount++;
|
||||
return run(retryAfter);
|
||||
} else {
|
||||
_this2.doDone(eventInfo);
|
||||
|
||||
yield free(_this2.options, eventInfo);
|
||||
|
||||
_this2._assertStatus("DONE");
|
||||
|
||||
return _this2._reject(error);
|
||||
}
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
doDone(eventInfo) {
|
||||
this._assertStatus("EXECUTING");
|
||||
|
||||
this._states.next(this.options.id);
|
||||
|
||||
return this.Events.trigger("done", eventInfo);
|
||||
}
|
||||
|
||||
};
|
||||
module.exports = Job;
|
287
node_modules/bottleneck/lib/LocalDatastore.js
generated
vendored
Normal file
287
node_modules/bottleneck/lib/LocalDatastore.js
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
"use strict";
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var BottleneckError, LocalDatastore, parser;
|
||||
parser = require("./parser");
|
||||
BottleneckError = require("./BottleneckError");
|
||||
LocalDatastore = class LocalDatastore {
|
||||
constructor(instance, storeOptions, storeInstanceOptions) {
|
||||
this.instance = instance;
|
||||
this.storeOptions = storeOptions;
|
||||
this.clientId = this.instance._randomIndex();
|
||||
parser.load(storeInstanceOptions, storeInstanceOptions, this);
|
||||
this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now();
|
||||
this._running = 0;
|
||||
this._done = 0;
|
||||
this._unblockTime = 0;
|
||||
this.ready = this.Promise.resolve();
|
||||
this.clients = {};
|
||||
|
||||
this._startHeartbeat();
|
||||
}
|
||||
|
||||
_startHeartbeat() {
|
||||
var base;
|
||||
|
||||
if (this.heartbeat == null && (this.storeOptions.reservoirRefreshInterval != null && this.storeOptions.reservoirRefreshAmount != null || this.storeOptions.reservoirIncreaseInterval != null && this.storeOptions.reservoirIncreaseAmount != null)) {
|
||||
return typeof (base = this.heartbeat = setInterval(() => {
|
||||
var amount, incr, maximum, now, reservoir;
|
||||
now = Date.now();
|
||||
|
||||
if (this.storeOptions.reservoirRefreshInterval != null && now >= this._lastReservoirRefresh + this.storeOptions.reservoirRefreshInterval) {
|
||||
this._lastReservoirRefresh = now;
|
||||
this.storeOptions.reservoir = this.storeOptions.reservoirRefreshAmount;
|
||||
|
||||
this.instance._drainAll(this.computeCapacity());
|
||||
}
|
||||
|
||||
if (this.storeOptions.reservoirIncreaseInterval != null && now >= this._lastReservoirIncrease + this.storeOptions.reservoirIncreaseInterval) {
|
||||
var _this$storeOptions = this.storeOptions;
|
||||
amount = _this$storeOptions.reservoirIncreaseAmount;
|
||||
maximum = _this$storeOptions.reservoirIncreaseMaximum;
|
||||
reservoir = _this$storeOptions.reservoir;
|
||||
this._lastReservoirIncrease = now;
|
||||
incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount;
|
||||
|
||||
if (incr > 0) {
|
||||
this.storeOptions.reservoir += incr;
|
||||
return this.instance._drainAll(this.computeCapacity());
|
||||
}
|
||||
}
|
||||
}, this.heartbeatInterval)).unref === "function" ? base.unref() : void 0;
|
||||
} else {
|
||||
return clearInterval(this.heartbeat);
|
||||
}
|
||||
}
|
||||
|
||||
__publish__(message) {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this.yieldLoop();
|
||||
return _this.instance.Events.trigger("message", message.toString());
|
||||
})();
|
||||
}
|
||||
|
||||
__disconnect__(flush) {
|
||||
var _this2 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this2.yieldLoop();
|
||||
clearInterval(_this2.heartbeat);
|
||||
return _this2.Promise.resolve();
|
||||
})();
|
||||
}
|
||||
|
||||
yieldLoop(t = 0) {
|
||||
return new this.Promise(function (resolve, reject) {
|
||||
return setTimeout(resolve, t);
|
||||
});
|
||||
}
|
||||
|
||||
computePenalty() {
|
||||
var ref;
|
||||
return (ref = this.storeOptions.penalty) != null ? ref : 15 * this.storeOptions.minTime || 5000;
|
||||
}
|
||||
|
||||
__updateSettings__(options) {
|
||||
var _this3 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this3.yieldLoop();
|
||||
parser.overwrite(options, options, _this3.storeOptions);
|
||||
|
||||
_this3._startHeartbeat();
|
||||
|
||||
_this3.instance._drainAll(_this3.computeCapacity());
|
||||
|
||||
return true;
|
||||
})();
|
||||
}
|
||||
|
||||
__running__() {
|
||||
var _this4 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this4.yieldLoop();
|
||||
return _this4._running;
|
||||
})();
|
||||
}
|
||||
|
||||
__queued__() {
|
||||
var _this5 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this5.yieldLoop();
|
||||
return _this5.instance.queued();
|
||||
})();
|
||||
}
|
||||
|
||||
__done__() {
|
||||
var _this6 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this6.yieldLoop();
|
||||
return _this6._done;
|
||||
})();
|
||||
}
|
||||
|
||||
__groupCheck__(time) {
|
||||
var _this7 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this7.yieldLoop();
|
||||
return _this7._nextRequest + _this7.timeout < time;
|
||||
})();
|
||||
}
|
||||
|
||||
computeCapacity() {
|
||||
var maxConcurrent, reservoir;
|
||||
var _this$storeOptions2 = this.storeOptions;
|
||||
maxConcurrent = _this$storeOptions2.maxConcurrent;
|
||||
reservoir = _this$storeOptions2.reservoir;
|
||||
|
||||
if (maxConcurrent != null && reservoir != null) {
|
||||
return Math.min(maxConcurrent - this._running, reservoir);
|
||||
} else if (maxConcurrent != null) {
|
||||
return maxConcurrent - this._running;
|
||||
} else if (reservoir != null) {
|
||||
return reservoir;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
conditionsCheck(weight) {
|
||||
var capacity;
|
||||
capacity = this.computeCapacity();
|
||||
return capacity == null || weight <= capacity;
|
||||
}
|
||||
|
||||
__incrementReservoir__(incr) {
|
||||
var _this8 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var reservoir;
|
||||
yield _this8.yieldLoop();
|
||||
reservoir = _this8.storeOptions.reservoir += incr;
|
||||
|
||||
_this8.instance._drainAll(_this8.computeCapacity());
|
||||
|
||||
return reservoir;
|
||||
})();
|
||||
}
|
||||
|
||||
__currentReservoir__() {
|
||||
var _this9 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this9.yieldLoop();
|
||||
return _this9.storeOptions.reservoir;
|
||||
})();
|
||||
}
|
||||
|
||||
isBlocked(now) {
|
||||
return this._unblockTime >= now;
|
||||
}
|
||||
|
||||
check(weight, now) {
|
||||
return this.conditionsCheck(weight) && this._nextRequest - now <= 0;
|
||||
}
|
||||
|
||||
__check__(weight) {
|
||||
var _this10 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var now;
|
||||
yield _this10.yieldLoop();
|
||||
now = Date.now();
|
||||
return _this10.check(weight, now);
|
||||
})();
|
||||
}
|
||||
|
||||
__register__(index, weight, expiration) {
|
||||
var _this11 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var now, wait;
|
||||
yield _this11.yieldLoop();
|
||||
now = Date.now();
|
||||
|
||||
if (_this11.conditionsCheck(weight)) {
|
||||
_this11._running += weight;
|
||||
|
||||
if (_this11.storeOptions.reservoir != null) {
|
||||
_this11.storeOptions.reservoir -= weight;
|
||||
}
|
||||
|
||||
wait = Math.max(_this11._nextRequest - now, 0);
|
||||
_this11._nextRequest = now + wait + _this11.storeOptions.minTime;
|
||||
return {
|
||||
success: true,
|
||||
wait,
|
||||
reservoir: _this11.storeOptions.reservoir
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
success: false
|
||||
};
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
strategyIsBlock() {
|
||||
return this.storeOptions.strategy === 3;
|
||||
}
|
||||
|
||||
__submit__(queueLength, weight) {
|
||||
var _this12 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var blocked, now, reachedHWM;
|
||||
yield _this12.yieldLoop();
|
||||
|
||||
if (_this12.storeOptions.maxConcurrent != null && weight > _this12.storeOptions.maxConcurrent) {
|
||||
throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${_this12.storeOptions.maxConcurrent}`);
|
||||
}
|
||||
|
||||
now = Date.now();
|
||||
reachedHWM = _this12.storeOptions.highWater != null && queueLength === _this12.storeOptions.highWater && !_this12.check(weight, now);
|
||||
blocked = _this12.strategyIsBlock() && (reachedHWM || _this12.isBlocked(now));
|
||||
|
||||
if (blocked) {
|
||||
_this12._unblockTime = now + _this12.computePenalty();
|
||||
_this12._nextRequest = _this12._unblockTime + _this12.storeOptions.minTime;
|
||||
|
||||
_this12.instance._dropAllQueued();
|
||||
}
|
||||
|
||||
return {
|
||||
reachedHWM,
|
||||
blocked,
|
||||
strategy: _this12.storeOptions.strategy
|
||||
};
|
||||
})();
|
||||
}
|
||||
|
||||
__free__(index, weight) {
|
||||
var _this13 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this13.yieldLoop();
|
||||
_this13._running -= weight;
|
||||
_this13._done += weight;
|
||||
|
||||
_this13.instance._drainAll(_this13.computeCapacity());
|
||||
|
||||
return {
|
||||
running: _this13._running
|
||||
};
|
||||
})();
|
||||
}
|
||||
|
||||
};
|
||||
module.exports = LocalDatastore;
|
77
node_modules/bottleneck/lib/Queues.js
generated
vendored
Normal file
77
node_modules/bottleneck/lib/Queues.js
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
"use strict";
|
||||
|
||||
var DLList, Events, Queues;
|
||||
DLList = require("./DLList");
|
||||
Events = require("./Events");
|
||||
Queues = class Queues {
|
||||
constructor(num_priorities) {
|
||||
var i;
|
||||
this.Events = new Events(this);
|
||||
this._length = 0;
|
||||
|
||||
this._lists = function () {
|
||||
var j, ref, results;
|
||||
results = [];
|
||||
|
||||
for (i = j = 1, ref = num_priorities; 1 <= ref ? j <= ref : j >= ref; i = 1 <= ref ? ++j : --j) {
|
||||
results.push(new DLList(() => {
|
||||
return this.incr();
|
||||
}, () => {
|
||||
return this.decr();
|
||||
}));
|
||||
}
|
||||
|
||||
return results;
|
||||
}.call(this);
|
||||
}
|
||||
|
||||
incr() {
|
||||
if (this._length++ === 0) {
|
||||
return this.Events.trigger("leftzero");
|
||||
}
|
||||
}
|
||||
|
||||
decr() {
|
||||
if (--this._length === 0) {
|
||||
return this.Events.trigger("zero");
|
||||
}
|
||||
}
|
||||
|
||||
push(job) {
|
||||
return this._lists[job.options.priority].push(job);
|
||||
}
|
||||
|
||||
queued(priority) {
|
||||
if (priority != null) {
|
||||
return this._lists[priority].length;
|
||||
} else {
|
||||
return this._length;
|
||||
}
|
||||
}
|
||||
|
||||
shiftAll(fn) {
|
||||
return this._lists.forEach(function (list) {
|
||||
return list.forEachShift(fn);
|
||||
});
|
||||
}
|
||||
|
||||
getFirst(arr = this._lists) {
|
||||
var j, len, list;
|
||||
|
||||
for (j = 0, len = arr.length; j < len; j++) {
|
||||
list = arr[j];
|
||||
|
||||
if (list.length > 0) {
|
||||
return list;
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
shiftLastFrom(priority) {
|
||||
return this.getFirst(this._lists.slice(priority).reverse()).shift();
|
||||
}
|
||||
|
||||
};
|
||||
module.exports = Queues;
|
193
node_modules/bottleneck/lib/RedisConnection.js
generated
vendored
Normal file
193
node_modules/bottleneck/lib/RedisConnection.js
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
"use strict";
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var Events, RedisConnection, Scripts, parser;
|
||||
parser = require("./parser");
|
||||
Events = require("./Events");
|
||||
Scripts = require("./Scripts");
|
||||
|
||||
RedisConnection = function () {
|
||||
class RedisConnection {
|
||||
constructor(options = {}) {
|
||||
parser.load(options, this.defaults, this);
|
||||
|
||||
if (this.Redis == null) {
|
||||
this.Redis = eval("require")("redis"); // Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option.
|
||||
}
|
||||
|
||||
if (this.Events == null) {
|
||||
this.Events = new Events(this);
|
||||
}
|
||||
|
||||
this.terminated = false;
|
||||
|
||||
if (this.client == null) {
|
||||
this.client = this.Redis.createClient(this.clientOptions);
|
||||
}
|
||||
|
||||
this.subscriber = this.client.duplicate();
|
||||
this.limiters = {};
|
||||
this.shas = {};
|
||||
this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => {
|
||||
return this._loadScripts();
|
||||
}).then(() => {
|
||||
return {
|
||||
client: this.client,
|
||||
subscriber: this.subscriber
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
_setup(client, sub) {
|
||||
client.setMaxListeners(0);
|
||||
return new this.Promise((resolve, reject) => {
|
||||
client.on("error", e => {
|
||||
return this.Events.trigger("error", e);
|
||||
});
|
||||
|
||||
if (sub) {
|
||||
client.on("message", (channel, message) => {
|
||||
var ref;
|
||||
return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0;
|
||||
});
|
||||
}
|
||||
|
||||
if (client.ready) {
|
||||
return resolve();
|
||||
} else {
|
||||
return client.once("ready", resolve);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
_loadScript(name) {
|
||||
return new this.Promise((resolve, reject) => {
|
||||
var payload;
|
||||
payload = Scripts.payload(name);
|
||||
return this.client.multi([["script", "load", payload]]).exec((err, replies) => {
|
||||
if (err != null) {
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
this.shas[name] = replies[0];
|
||||
return resolve(replies[0]);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
_loadScripts() {
|
||||
return this.Promise.all(Scripts.names.map(k => {
|
||||
return this._loadScript(k);
|
||||
}));
|
||||
}
|
||||
|
||||
__runCommand__(cmd) {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this.ready;
|
||||
return new _this.Promise((resolve, reject) => {
|
||||
return _this.client.multi([cmd]).exec_atomic(function (err, replies) {
|
||||
if (err != null) {
|
||||
return reject(err);
|
||||
} else {
|
||||
return resolve(replies[0]);
|
||||
}
|
||||
});
|
||||
});
|
||||
})();
|
||||
}
|
||||
|
||||
__addLimiter__(instance) {
|
||||
return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => {
|
||||
return new this.Promise((resolve, reject) => {
|
||||
var handler;
|
||||
|
||||
handler = chan => {
|
||||
if (chan === channel) {
|
||||
this.subscriber.removeListener("subscribe", handler);
|
||||
this.limiters[channel] = instance;
|
||||
return resolve();
|
||||
}
|
||||
};
|
||||
|
||||
this.subscriber.on("subscribe", handler);
|
||||
return this.subscriber.subscribe(channel);
|
||||
});
|
||||
}));
|
||||
}
|
||||
|
||||
__removeLimiter__(instance) {
|
||||
var _this2 = this;
|
||||
|
||||
return this.Promise.all([instance.channel(), instance.channel_client()].map(
|
||||
/*#__PURE__*/
|
||||
function () {
|
||||
var _ref = _asyncToGenerator(function* (channel) {
|
||||
if (!_this2.terminated) {
|
||||
yield new _this2.Promise((resolve, reject) => {
|
||||
return _this2.subscriber.unsubscribe(channel, function (err, chan) {
|
||||
if (err != null) {
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
if (chan === channel) {
|
||||
return resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return delete _this2.limiters[channel];
|
||||
});
|
||||
|
||||
return function (_x) {
|
||||
return _ref.apply(this, arguments);
|
||||
};
|
||||
}()));
|
||||
}
|
||||
|
||||
__scriptArgs__(name, id, args, cb) {
|
||||
var keys;
|
||||
keys = Scripts.keys(name, id);
|
||||
return [this.shas[name], keys.length].concat(keys, args, cb);
|
||||
}
|
||||
|
||||
__scriptFn__(name) {
|
||||
return this.client.evalsha.bind(this.client);
|
||||
}
|
||||
|
||||
disconnect(flush = true) {
|
||||
var i, k, len, ref;
|
||||
ref = Object.keys(this.limiters);
|
||||
|
||||
for (i = 0, len = ref.length; i < len; i++) {
|
||||
k = ref[i];
|
||||
clearInterval(this.limiters[k]._store.heartbeat);
|
||||
}
|
||||
|
||||
this.limiters = {};
|
||||
this.terminated = true;
|
||||
this.client.end(flush);
|
||||
this.subscriber.end(flush);
|
||||
return this.Promise.resolve();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
;
|
||||
RedisConnection.prototype.datastore = "redis";
|
||||
RedisConnection.prototype.defaults = {
|
||||
Redis: null,
|
||||
clientOptions: {},
|
||||
client: null,
|
||||
Promise: Promise,
|
||||
Events: null
|
||||
};
|
||||
return RedisConnection;
|
||||
}.call(void 0);
|
||||
|
||||
module.exports = RedisConnection;
|
352
node_modules/bottleneck/lib/RedisDatastore.js
generated
vendored
Normal file
352
node_modules/bottleneck/lib/RedisDatastore.js
generated
vendored
Normal file
@ -0,0 +1,352 @@
|
||||
"use strict";
|
||||
|
||||
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
|
||||
|
||||
function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
|
||||
|
||||
function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
|
||||
|
||||
function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var BottleneckError, IORedisConnection, RedisConnection, RedisDatastore, parser;
|
||||
parser = require("./parser");
|
||||
BottleneckError = require("./BottleneckError");
|
||||
RedisConnection = require("./RedisConnection");
|
||||
IORedisConnection = require("./IORedisConnection");
|
||||
RedisDatastore = class RedisDatastore {
|
||||
constructor(instance, storeOptions, storeInstanceOptions) {
|
||||
this.instance = instance;
|
||||
this.storeOptions = storeOptions;
|
||||
this.originalId = this.instance.id;
|
||||
this.clientId = this.instance._randomIndex();
|
||||
parser.load(storeInstanceOptions, storeInstanceOptions, this);
|
||||
this.clients = {};
|
||||
this.capacityPriorityCounters = {};
|
||||
this.sharedConnection = this.connection != null;
|
||||
|
||||
if (this.connection == null) {
|
||||
this.connection = this.instance.datastore === "redis" ? new RedisConnection({
|
||||
Redis: this.Redis,
|
||||
clientOptions: this.clientOptions,
|
||||
Promise: this.Promise,
|
||||
Events: this.instance.Events
|
||||
}) : this.instance.datastore === "ioredis" ? new IORedisConnection({
|
||||
Redis: this.Redis,
|
||||
clientOptions: this.clientOptions,
|
||||
clusterNodes: this.clusterNodes,
|
||||
Promise: this.Promise,
|
||||
Events: this.instance.Events
|
||||
}) : void 0;
|
||||
}
|
||||
|
||||
this.instance.connection = this.connection;
|
||||
this.instance.datastore = this.connection.datastore;
|
||||
this.ready = this.connection.ready.then(clients => {
|
||||
this.clients = clients;
|
||||
return this.runScript("init", this.prepareInitSettings(this.clearDatastore));
|
||||
}).then(() => {
|
||||
return this.connection.__addLimiter__(this.instance);
|
||||
}).then(() => {
|
||||
return this.runScript("register_client", [this.instance.queued()]);
|
||||
}).then(() => {
|
||||
var base;
|
||||
|
||||
if (typeof (base = this.heartbeat = setInterval(() => {
|
||||
return this.runScript("heartbeat", []).catch(e => {
|
||||
return this.instance.Events.trigger("error", e);
|
||||
});
|
||||
}, this.heartbeatInterval)).unref === "function") {
|
||||
base.unref();
|
||||
}
|
||||
|
||||
return this.clients;
|
||||
});
|
||||
}
|
||||
|
||||
__publish__(message) {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var client;
|
||||
|
||||
var _ref = yield _this.ready;
|
||||
|
||||
client = _ref.client;
|
||||
return client.publish(_this.instance.channel(), `message:${message.toString()}`);
|
||||
})();
|
||||
}
|
||||
|
||||
onMessage(channel, message) {
|
||||
var _this2 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var capacity, counter, data, drained, e, newCapacity, pos, priorityClient, rawCapacity, type;
|
||||
|
||||
try {
|
||||
pos = message.indexOf(":");
|
||||
var _ref2 = [message.slice(0, pos), message.slice(pos + 1)];
|
||||
type = _ref2[0];
|
||||
data = _ref2[1];
|
||||
|
||||
if (type === "capacity") {
|
||||
return yield _this2.instance._drainAll(data.length > 0 ? ~~data : void 0);
|
||||
} else if (type === "capacity-priority") {
|
||||
var _data$split = data.split(":");
|
||||
|
||||
var _data$split2 = _slicedToArray(_data$split, 3);
|
||||
|
||||
rawCapacity = _data$split2[0];
|
||||
priorityClient = _data$split2[1];
|
||||
counter = _data$split2[2];
|
||||
capacity = rawCapacity.length > 0 ? ~~rawCapacity : void 0;
|
||||
|
||||
if (priorityClient === _this2.clientId) {
|
||||
drained = yield _this2.instance._drainAll(capacity);
|
||||
newCapacity = capacity != null ? capacity - (drained || 0) : "";
|
||||
return yield _this2.clients.client.publish(_this2.instance.channel(), `capacity-priority:${newCapacity}::${counter}`);
|
||||
} else if (priorityClient === "") {
|
||||
clearTimeout(_this2.capacityPriorityCounters[counter]);
|
||||
delete _this2.capacityPriorityCounters[counter];
|
||||
return _this2.instance._drainAll(capacity);
|
||||
} else {
|
||||
return _this2.capacityPriorityCounters[counter] = setTimeout(
|
||||
/*#__PURE__*/
|
||||
_asyncToGenerator(function* () {
|
||||
var e;
|
||||
|
||||
try {
|
||||
delete _this2.capacityPriorityCounters[counter];
|
||||
yield _this2.runScript("blacklist_client", [priorityClient]);
|
||||
return yield _this2.instance._drainAll(capacity);
|
||||
} catch (error) {
|
||||
e = error;
|
||||
return _this2.instance.Events.trigger("error", e);
|
||||
}
|
||||
}), 1000);
|
||||
}
|
||||
} else if (type === "message") {
|
||||
return _this2.instance.Events.trigger("message", data);
|
||||
} else if (type === "blocked") {
|
||||
return yield _this2.instance._dropAllQueued();
|
||||
}
|
||||
} catch (error) {
|
||||
e = error;
|
||||
return _this2.instance.Events.trigger("error", e);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
__disconnect__(flush) {
|
||||
clearInterval(this.heartbeat);
|
||||
|
||||
if (this.sharedConnection) {
|
||||
return this.connection.__removeLimiter__(this.instance);
|
||||
} else {
|
||||
return this.connection.disconnect(flush);
|
||||
}
|
||||
}
|
||||
|
||||
runScript(name, args) {
|
||||
var _this3 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
if (!(name === "init" || name === "register_client")) {
|
||||
yield _this3.ready;
|
||||
}
|
||||
|
||||
return new _this3.Promise((resolve, reject) => {
|
||||
var all_args, arr;
|
||||
all_args = [Date.now(), _this3.clientId].concat(args);
|
||||
|
||||
_this3.instance.Events.trigger("debug", `Calling Redis script: ${name}.lua`, all_args);
|
||||
|
||||
arr = _this3.connection.__scriptArgs__(name, _this3.originalId, all_args, function (err, replies) {
|
||||
if (err != null) {
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
return resolve(replies);
|
||||
});
|
||||
return _this3.connection.__scriptFn__(name)(...arr);
|
||||
}).catch(e => {
|
||||
if (e.message === "SETTINGS_KEY_NOT_FOUND") {
|
||||
if (name === "heartbeat") {
|
||||
return _this3.Promise.resolve();
|
||||
} else {
|
||||
return _this3.runScript("init", _this3.prepareInitSettings(false)).then(() => {
|
||||
return _this3.runScript(name, args);
|
||||
});
|
||||
}
|
||||
} else if (e.message === "UNKNOWN_CLIENT") {
|
||||
return _this3.runScript("register_client", [_this3.instance.queued()]).then(() => {
|
||||
return _this3.runScript(name, args);
|
||||
});
|
||||
} else {
|
||||
return _this3.Promise.reject(e);
|
||||
}
|
||||
});
|
||||
})();
|
||||
}
|
||||
|
||||
prepareArray(arr) {
|
||||
var i, len, results, x;
|
||||
results = [];
|
||||
|
||||
for (i = 0, len = arr.length; i < len; i++) {
|
||||
x = arr[i];
|
||||
results.push(x != null ? x.toString() : "");
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
prepareObject(obj) {
|
||||
var arr, k, v;
|
||||
arr = [];
|
||||
|
||||
for (k in obj) {
|
||||
v = obj[k];
|
||||
arr.push(k, v != null ? v.toString() : "");
|
||||
}
|
||||
|
||||
return arr;
|
||||
}
|
||||
|
||||
prepareInitSettings(clear) {
|
||||
var args;
|
||||
args = this.prepareObject(Object.assign({}, this.storeOptions, {
|
||||
id: this.originalId,
|
||||
version: this.instance.version,
|
||||
groupTimeout: this.timeout,
|
||||
clientTimeout: this.clientTimeout
|
||||
}));
|
||||
args.unshift(clear ? 1 : 0, this.instance.version);
|
||||
return args;
|
||||
}
|
||||
|
||||
convertBool(b) {
|
||||
return !!b;
|
||||
}
|
||||
|
||||
__updateSettings__(options) {
|
||||
var _this4 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
yield _this4.runScript("update_settings", _this4.prepareObject(options));
|
||||
return parser.overwrite(options, options, _this4.storeOptions);
|
||||
})();
|
||||
}
|
||||
|
||||
__running__() {
|
||||
return this.runScript("running", []);
|
||||
}
|
||||
|
||||
__queued__() {
|
||||
return this.runScript("queued", []);
|
||||
}
|
||||
|
||||
__done__() {
|
||||
return this.runScript("done", []);
|
||||
}
|
||||
|
||||
__groupCheck__() {
|
||||
var _this5 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
return _this5.convertBool((yield _this5.runScript("group_check", [])));
|
||||
})();
|
||||
}
|
||||
|
||||
__incrementReservoir__(incr) {
|
||||
return this.runScript("increment_reservoir", [incr]);
|
||||
}
|
||||
|
||||
__currentReservoir__() {
|
||||
return this.runScript("current_reservoir", []);
|
||||
}
|
||||
|
||||
__check__(weight) {
|
||||
var _this6 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
return _this6.convertBool((yield _this6.runScript("check", _this6.prepareArray([weight]))));
|
||||
})();
|
||||
}
|
||||
|
||||
__register__(index, weight, expiration) {
|
||||
var _this7 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var reservoir, success, wait;
|
||||
|
||||
var _ref4 = yield _this7.runScript("register", _this7.prepareArray([index, weight, expiration]));
|
||||
|
||||
var _ref5 = _slicedToArray(_ref4, 3);
|
||||
|
||||
success = _ref5[0];
|
||||
wait = _ref5[1];
|
||||
reservoir = _ref5[2];
|
||||
return {
|
||||
success: _this7.convertBool(success),
|
||||
wait,
|
||||
reservoir
|
||||
};
|
||||
})();
|
||||
}
|
||||
|
||||
__submit__(queueLength, weight) {
|
||||
var _this8 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var blocked, e, maxConcurrent, overweight, reachedHWM, strategy;
|
||||
|
||||
try {
|
||||
var _ref6 = yield _this8.runScript("submit", _this8.prepareArray([queueLength, weight]));
|
||||
|
||||
var _ref7 = _slicedToArray(_ref6, 3);
|
||||
|
||||
reachedHWM = _ref7[0];
|
||||
blocked = _ref7[1];
|
||||
strategy = _ref7[2];
|
||||
return {
|
||||
reachedHWM: _this8.convertBool(reachedHWM),
|
||||
blocked: _this8.convertBool(blocked),
|
||||
strategy
|
||||
};
|
||||
} catch (error) {
|
||||
e = error;
|
||||
|
||||
if (e.message.indexOf("OVERWEIGHT") === 0) {
|
||||
var _e$message$split = e.message.split(":");
|
||||
|
||||
var _e$message$split2 = _slicedToArray(_e$message$split, 3);
|
||||
|
||||
overweight = _e$message$split2[0];
|
||||
weight = _e$message$split2[1];
|
||||
maxConcurrent = _e$message$split2[2];
|
||||
throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${maxConcurrent}`);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
__free__(index, weight) {
|
||||
var _this9 = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var running;
|
||||
running = yield _this9.runScript("free", _this9.prepareArray([index]));
|
||||
return {
|
||||
running
|
||||
};
|
||||
})();
|
||||
}
|
||||
|
||||
};
|
||||
module.exports = RedisDatastore;
|
162
node_modules/bottleneck/lib/Scripts.js
generated
vendored
Normal file
162
node_modules/bottleneck/lib/Scripts.js
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
"use strict";
|
||||
|
||||
var headers, lua, templates;
|
||||
lua = require("./lua.json");
|
||||
headers = {
|
||||
refs: lua["refs.lua"],
|
||||
validate_keys: lua["validate_keys.lua"],
|
||||
validate_client: lua["validate_client.lua"],
|
||||
refresh_expiration: lua["refresh_expiration.lua"],
|
||||
process_tick: lua["process_tick.lua"],
|
||||
conditions_check: lua["conditions_check.lua"],
|
||||
get_time: lua["get_time.lua"]
|
||||
};
|
||||
|
||||
exports.allKeys = function (id) {
|
||||
return [
|
||||
/*
|
||||
HASH
|
||||
*/
|
||||
`b_${id}_settings`,
|
||||
/*
|
||||
HASH
|
||||
job index -> weight
|
||||
*/
|
||||
`b_${id}_job_weights`,
|
||||
/*
|
||||
ZSET
|
||||
job index -> expiration
|
||||
*/
|
||||
`b_${id}_job_expirations`,
|
||||
/*
|
||||
HASH
|
||||
job index -> client
|
||||
*/
|
||||
`b_${id}_job_clients`,
|
||||
/*
|
||||
ZSET
|
||||
client -> sum running
|
||||
*/
|
||||
`b_${id}_client_running`,
|
||||
/*
|
||||
HASH
|
||||
client -> num queued
|
||||
*/
|
||||
`b_${id}_client_num_queued`,
|
||||
/*
|
||||
ZSET
|
||||
client -> last job registered
|
||||
*/
|
||||
`b_${id}_client_last_registered`,
|
||||
/*
|
||||
ZSET
|
||||
client -> last seen
|
||||
*/
|
||||
`b_${id}_client_last_seen`];
|
||||
};
|
||||
|
||||
templates = {
|
||||
init: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["process_tick"],
|
||||
refresh_expiration: true,
|
||||
code: lua["init.lua"]
|
||||
},
|
||||
group_check: {
|
||||
keys: exports.allKeys,
|
||||
headers: [],
|
||||
refresh_expiration: false,
|
||||
code: lua["group_check.lua"]
|
||||
},
|
||||
register_client: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys"],
|
||||
refresh_expiration: false,
|
||||
code: lua["register_client.lua"]
|
||||
},
|
||||
blacklist_client: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client"],
|
||||
refresh_expiration: false,
|
||||
code: lua["blacklist_client.lua"]
|
||||
},
|
||||
heartbeat: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick"],
|
||||
refresh_expiration: false,
|
||||
code: lua["heartbeat.lua"]
|
||||
},
|
||||
update_settings: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick"],
|
||||
refresh_expiration: true,
|
||||
code: lua["update_settings.lua"]
|
||||
},
|
||||
running: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick"],
|
||||
refresh_expiration: false,
|
||||
code: lua["running.lua"]
|
||||
},
|
||||
queued: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client"],
|
||||
refresh_expiration: false,
|
||||
code: lua["queued.lua"]
|
||||
},
|
||||
done: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick"],
|
||||
refresh_expiration: false,
|
||||
code: lua["done.lua"]
|
||||
},
|
||||
check: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
|
||||
refresh_expiration: false,
|
||||
code: lua["check.lua"]
|
||||
},
|
||||
submit: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
|
||||
refresh_expiration: true,
|
||||
code: lua["submit.lua"]
|
||||
},
|
||||
register: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
|
||||
refresh_expiration: true,
|
||||
code: lua["register.lua"]
|
||||
},
|
||||
free: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick"],
|
||||
refresh_expiration: true,
|
||||
code: lua["free.lua"]
|
||||
},
|
||||
current_reservoir: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick"],
|
||||
refresh_expiration: false,
|
||||
code: lua["current_reservoir.lua"]
|
||||
},
|
||||
increment_reservoir: {
|
||||
keys: exports.allKeys,
|
||||
headers: ["validate_keys", "validate_client", "process_tick"],
|
||||
refresh_expiration: true,
|
||||
code: lua["increment_reservoir.lua"]
|
||||
}
|
||||
};
|
||||
exports.names = Object.keys(templates);
|
||||
|
||||
exports.keys = function (name, id) {
|
||||
return templates[name].keys(id);
|
||||
};
|
||||
|
||||
exports.payload = function (name) {
|
||||
var template;
|
||||
template = templates[name];
|
||||
return Array.prototype.concat(headers.refs, template.headers.map(function (h) {
|
||||
return headers[h];
|
||||
}), template.refresh_expiration ? headers.refresh_expiration : "", template.code).join("\n");
|
||||
};
|
88
node_modules/bottleneck/lib/States.js
generated
vendored
Normal file
88
node_modules/bottleneck/lib/States.js
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
"use strict";
|
||||
|
||||
var BottleneckError, States;
|
||||
BottleneckError = require("./BottleneckError");
|
||||
States = class States {
|
||||
constructor(status1) {
|
||||
this.status = status1;
|
||||
this._jobs = {};
|
||||
this.counts = this.status.map(function () {
|
||||
return 0;
|
||||
});
|
||||
}
|
||||
|
||||
next(id) {
|
||||
var current, next;
|
||||
current = this._jobs[id];
|
||||
next = current + 1;
|
||||
|
||||
if (current != null && next < this.status.length) {
|
||||
this.counts[current]--;
|
||||
this.counts[next]++;
|
||||
return this._jobs[id]++;
|
||||
} else if (current != null) {
|
||||
this.counts[current]--;
|
||||
return delete this._jobs[id];
|
||||
}
|
||||
}
|
||||
|
||||
start(id) {
|
||||
var initial;
|
||||
initial = 0;
|
||||
this._jobs[id] = initial;
|
||||
return this.counts[initial]++;
|
||||
}
|
||||
|
||||
remove(id) {
|
||||
var current;
|
||||
current = this._jobs[id];
|
||||
|
||||
if (current != null) {
|
||||
this.counts[current]--;
|
||||
delete this._jobs[id];
|
||||
}
|
||||
|
||||
return current != null;
|
||||
}
|
||||
|
||||
jobStatus(id) {
|
||||
var ref;
|
||||
return (ref = this.status[this._jobs[id]]) != null ? ref : null;
|
||||
}
|
||||
|
||||
statusJobs(status) {
|
||||
var k, pos, ref, results, v;
|
||||
|
||||
if (status != null) {
|
||||
pos = this.status.indexOf(status);
|
||||
|
||||
if (pos < 0) {
|
||||
throw new BottleneckError(`status must be one of ${this.status.join(', ')}`);
|
||||
}
|
||||
|
||||
ref = this._jobs;
|
||||
results = [];
|
||||
|
||||
for (k in ref) {
|
||||
v = ref[k];
|
||||
|
||||
if (v === pos) {
|
||||
results.push(k);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
} else {
|
||||
return Object.keys(this._jobs);
|
||||
}
|
||||
}
|
||||
|
||||
statusCounts() {
|
||||
return this.counts.reduce((acc, v, i) => {
|
||||
acc[this.status[i]] = v;
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
|
||||
};
|
||||
module.exports = States;
|
80
node_modules/bottleneck/lib/Sync.js
generated
vendored
Normal file
80
node_modules/bottleneck/lib/Sync.js
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
"use strict";
|
||||
|
||||
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||||
|
||||
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||||
|
||||
var DLList, Sync;
|
||||
DLList = require("./DLList");
|
||||
Sync = class Sync {
|
||||
constructor(name, Promise) {
|
||||
this.schedule = this.schedule.bind(this);
|
||||
this.name = name;
|
||||
this.Promise = Promise;
|
||||
this._running = 0;
|
||||
this._queue = new DLList();
|
||||
}
|
||||
|
||||
isEmpty() {
|
||||
return this._queue.length === 0;
|
||||
}
|
||||
|
||||
_tryToRun() {
|
||||
var _this = this;
|
||||
|
||||
return _asyncToGenerator(function* () {
|
||||
var args, cb, error, reject, resolve, returned, task;
|
||||
|
||||
if (_this._running < 1 && _this._queue.length > 0) {
|
||||
_this._running++;
|
||||
|
||||
var _this$_queue$shift = _this._queue.shift();
|
||||
|
||||
task = _this$_queue$shift.task;
|
||||
args = _this$_queue$shift.args;
|
||||
resolve = _this$_queue$shift.resolve;
|
||||
reject = _this$_queue$shift.reject;
|
||||
cb = yield _asyncToGenerator(function* () {
|
||||
try {
|
||||
returned = yield task(...args);
|
||||
return function () {
|
||||
return resolve(returned);
|
||||
};
|
||||
} catch (error1) {
|
||||
error = error1;
|
||||
return function () {
|
||||
return reject(error);
|
||||
};
|
||||
}
|
||||
})();
|
||||
_this._running--;
|
||||
|
||||
_this._tryToRun();
|
||||
|
||||
return cb();
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
schedule(task, ...args) {
|
||||
var promise, reject, resolve;
|
||||
resolve = reject = null;
|
||||
promise = new this.Promise(function (_resolve, _reject) {
|
||||
resolve = _resolve;
|
||||
return reject = _reject;
|
||||
});
|
||||
|
||||
this._queue.push({
|
||||
task,
|
||||
args,
|
||||
resolve,
|
||||
reject
|
||||
});
|
||||
|
||||
this._tryToRun();
|
||||
|
||||
return promise;
|
||||
}
|
||||
|
||||
};
|
||||
module.exports = Sync;
|
5
node_modules/bottleneck/lib/es5.js
generated
vendored
Normal file
5
node_modules/bottleneck/lib/es5.js
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
"use strict";
|
||||
|
||||
require("regenerator-runtime/runtime");
|
||||
|
||||
module.exports = require("./Bottleneck");
|
3
node_modules/bottleneck/lib/index.js
generated
vendored
Normal file
3
node_modules/bottleneck/lib/index.js
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
"use strict";
|
||||
|
||||
module.exports = require("./Bottleneck");
|
24
node_modules/bottleneck/lib/lua.json
generated
vendored
Normal file
24
node_modules/bottleneck/lib/lua.json
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
26
node_modules/bottleneck/lib/parser.js
generated
vendored
Normal file
26
node_modules/bottleneck/lib/parser.js
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
"use strict";
|
||||
|
||||
exports.load = function (received, defaults, onto = {}) {
|
||||
var k, ref, v;
|
||||
|
||||
for (k in defaults) {
|
||||
v = defaults[k];
|
||||
onto[k] = (ref = received[k]) != null ? ref : v;
|
||||
}
|
||||
|
||||
return onto;
|
||||
};
|
||||
|
||||
exports.overwrite = function (received, defaults, onto = {}) {
|
||||
var k, v;
|
||||
|
||||
for (k in received) {
|
||||
v = received[k];
|
||||
|
||||
if (defaults[k] !== void 0) {
|
||||
onto[k] = v;
|
||||
}
|
||||
}
|
||||
|
||||
return onto;
|
||||
};
|
1
node_modules/bottleneck/lib/version.json
generated
vendored
Normal file
1
node_modules/bottleneck/lib/version.json
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
{"version":"2.19.5"}
|
1524
node_modules/bottleneck/light.js
generated
vendored
Normal file
1524
node_modules/bottleneck/light.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
85
node_modules/bottleneck/package.json
generated
vendored
Normal file
85
node_modules/bottleneck/package.json
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
{
|
||||
"_args": [
|
||||
[
|
||||
"bottleneck@2.19.5",
|
||||
"/Users/dougtangren/code/rust/action-gh-release"
|
||||
]
|
||||
],
|
||||
"_from": "bottleneck@2.19.5",
|
||||
"_id": "bottleneck@2.19.5",
|
||||
"_inBundle": false,
|
||||
"_integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==",
|
||||
"_location": "/bottleneck",
|
||||
"_phantomChildren": {},
|
||||
"_requested": {
|
||||
"type": "version",
|
||||
"registry": true,
|
||||
"raw": "bottleneck@2.19.5",
|
||||
"name": "bottleneck",
|
||||
"escapedName": "bottleneck",
|
||||
"rawSpec": "2.19.5",
|
||||
"saveSpec": null,
|
||||
"fetchSpec": "2.19.5"
|
||||
},
|
||||
"_requiredBy": [
|
||||
"/@octokit/plugin-throttling"
|
||||
],
|
||||
"_resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz",
|
||||
"_spec": "2.19.5",
|
||||
"_where": "/Users/dougtangren/code/rust/action-gh-release",
|
||||
"author": {
|
||||
"name": "Simon Grondin"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/SGrondin/bottleneck/issues"
|
||||
},
|
||||
"dependencies": {},
|
||||
"description": "Distributed task scheduler and rate limiter",
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.5.0",
|
||||
"@babel/preset-env": "^7.5.0",
|
||||
"@types/es6-promise": "0.0.33",
|
||||
"assert": "^1.5.0",
|
||||
"coffeescript": "2.4.x",
|
||||
"ejs-cli": "github:SGrondin/ejs-cli#master",
|
||||
"ioredis": "^4.11.1",
|
||||
"leakage": "^0.4.0",
|
||||
"mocha": "^6.1.4",
|
||||
"redis": "^2.8.0",
|
||||
"regenerator-runtime": "^0.12.1",
|
||||
"rollup": "^0.66.6",
|
||||
"rollup-plugin-babel": "^4.3.3",
|
||||
"rollup-plugin-commonjs": "^9.3.4",
|
||||
"rollup-plugin-json": "^3.1.0",
|
||||
"rollup-plugin-node-resolve": "^3.4.0",
|
||||
"typescript": "^2.6.2"
|
||||
},
|
||||
"homepage": "https://github.com/SGrondin/bottleneck#readme",
|
||||
"keywords": [
|
||||
"async rate limiter",
|
||||
"rate limiter",
|
||||
"rate limiting",
|
||||
"async",
|
||||
"rate",
|
||||
"limiting",
|
||||
"limiter",
|
||||
"throttle",
|
||||
"throttling",
|
||||
"throttler",
|
||||
"load",
|
||||
"clustering"
|
||||
],
|
||||
"license": "MIT",
|
||||
"main": "lib/index.js",
|
||||
"name": "bottleneck",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/SGrondin/bottleneck.git"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "mocha test",
|
||||
"test-all": "./scripts/test_all.sh"
|
||||
},
|
||||
"typings": "bottleneck.d.ts",
|
||||
"version": "2.19.5"
|
||||
}
|
34
node_modules/bottleneck/rollup.config.es5.js
generated
vendored
Normal file
34
node_modules/bottleneck/rollup.config.es5.js
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
import json from 'rollup-plugin-json';
|
||||
import resolve from 'rollup-plugin-node-resolve';
|
||||
import commonjs from 'rollup-plugin-commonjs';
|
||||
import babel from 'rollup-plugin-babel';
|
||||
|
||||
const bannerLines = [
|
||||
'This file contains the full Bottleneck library (MIT) compiled to ES5.',
|
||||
'https://github.com/SGrondin/bottleneck',
|
||||
'It also contains the regenerator-runtime (MIT), necessary for Babel-generated ES5 code to execute promise and async/await code.',
|
||||
'See the following link for Copyright and License information:',
|
||||
'https://github.com/facebook/regenerator/blob/master/packages/regenerator-runtime/runtime.js',
|
||||
].map(x => ` * ${x}`).join('\n');
|
||||
const banner = `/**\n${bannerLines}\n */`;
|
||||
|
||||
export default {
|
||||
input: 'lib/es5.js',
|
||||
output: {
|
||||
name: 'Bottleneck',
|
||||
file: 'es5.js',
|
||||
sourcemap: false,
|
||||
globals: {},
|
||||
format: 'umd',
|
||||
banner
|
||||
},
|
||||
external: [],
|
||||
plugins: [
|
||||
json(),
|
||||
resolve(),
|
||||
commonjs(),
|
||||
babel({
|
||||
exclude: 'node_modules/**'
|
||||
})
|
||||
]
|
||||
};
|
44
node_modules/bottleneck/rollup.config.light.js
generated
vendored
Normal file
44
node_modules/bottleneck/rollup.config.light.js
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
import commonjs from 'rollup-plugin-commonjs';
|
||||
import json from 'rollup-plugin-json';
|
||||
import resolve from 'rollup-plugin-node-resolve';
|
||||
|
||||
const bannerLines = [
|
||||
'This file contains the Bottleneck library (MIT), compiled to ES2017, and without Clustering support.',
|
||||
'https://github.com/SGrondin/bottleneck',
|
||||
].map(x => ` * ${x}`).join('\n');
|
||||
const banner = `/**\n${bannerLines}\n */`;
|
||||
|
||||
const missing = `export default () => console.log('You must import the full version of Bottleneck in order to use this feature.');`;
|
||||
const exclude = [
|
||||
'RedisDatastore.js',
|
||||
'RedisConnection.js',
|
||||
'IORedisConnection.js',
|
||||
'Scripts.js'
|
||||
];
|
||||
|
||||
export default {
|
||||
input: 'lib/index.js',
|
||||
output: {
|
||||
name: 'Bottleneck',
|
||||
file: 'light.js',
|
||||
sourcemap: false,
|
||||
globals: {},
|
||||
format: 'umd',
|
||||
banner
|
||||
},
|
||||
external: [],
|
||||
plugins: [
|
||||
json(),
|
||||
{
|
||||
load: id => {
|
||||
const chunks = id.split('/');
|
||||
const file = chunks[chunks.length - 1];
|
||||
if (exclude.indexOf(file) >= 0) {
|
||||
return missing
|
||||
}
|
||||
}
|
||||
},
|
||||
resolve(),
|
||||
commonjs()
|
||||
]
|
||||
};
|
25
node_modules/bottleneck/scripts/assemble_lua.js
generated
vendored
Normal file
25
node_modules/bottleneck/scripts/assemble_lua.js
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
var fs = require('fs')
|
||||
|
||||
var input = __dirname + '/../src/redis'
|
||||
var loaded = {}
|
||||
|
||||
var promises = fs.readdirSync(input).map(function (file) {
|
||||
return new Promise(function (resolve, reject) {
|
||||
fs.readFile(input + '/' + file, function (err, data) {
|
||||
if (err != null) {
|
||||
return reject(err)
|
||||
}
|
||||
loaded[file] = data.toString('utf8')
|
||||
return resolve()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Promise.all(promises)
|
||||
.then(function () {
|
||||
console.log(JSON.stringify(loaded, Object.keys(loaded).sort(), 2))
|
||||
})
|
||||
.catch(function (err) {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
82
node_modules/bottleneck/scripts/build.sh
generated
vendored
Executable file
82
node_modules/bottleneck/scripts/build.sh
generated
vendored
Executable file
@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -d node_modules ]; then
|
||||
echo "[B] Run 'npm install' first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
clean() {
|
||||
rm -f .babelrc
|
||||
rm -rf lib/*
|
||||
node scripts/version.js > lib/version.json
|
||||
node scripts/assemble_lua.js > lib/lua.json
|
||||
}
|
||||
|
||||
makeLib10() {
|
||||
echo '[B] Compiling Bottleneck to Node 10+...'
|
||||
npx coffee --compile --bare --no-header src/*.coffee
|
||||
mv src/*.js lib/
|
||||
}
|
||||
|
||||
makeLib6() {
|
||||
echo '[B] Compiling Bottleneck to Node 6+...'
|
||||
ln -s .babelrc.lib .babelrc
|
||||
npx coffee --compile --bare --no-header --transpile src/*.coffee
|
||||
mv src/*.js lib/
|
||||
}
|
||||
|
||||
makeES5() {
|
||||
echo '[B] Compiling Bottleneck to ES5...'
|
||||
ln -s .babelrc.es5 .babelrc
|
||||
npx coffee --compile --bare --no-header src/*.coffee
|
||||
mv src/*.js lib/
|
||||
|
||||
echo '[B] Assembling ES5 bundle...'
|
||||
npx rollup -c rollup.config.es5.js
|
||||
}
|
||||
|
||||
makeLight() {
|
||||
makeLib10
|
||||
|
||||
echo '[B] Assembling light bundle...'
|
||||
npx rollup -c rollup.config.light.js
|
||||
}
|
||||
|
||||
makeTypings() {
|
||||
echo '[B] Compiling and testing TS typings...'
|
||||
npx ejs-cli bottleneck.d.ts.ejs > bottleneck.d.ts
|
||||
npx tsc --noEmit --strict test.ts
|
||||
}
|
||||
|
||||
if [ "$1" = 'dev' ]; then
|
||||
clean
|
||||
makeLib10
|
||||
elif [ "$1" = 'bench' ]; then
|
||||
clean
|
||||
makeLib6
|
||||
elif [ "$1" = 'es5' ]; then
|
||||
clean
|
||||
makeES5
|
||||
elif [ "$1" = 'light' ]; then
|
||||
clean
|
||||
makeLight
|
||||
elif [ "$1" = 'typings' ]; then
|
||||
makeTypings
|
||||
else
|
||||
clean
|
||||
makeES5
|
||||
|
||||
clean
|
||||
makeLight
|
||||
|
||||
clean
|
||||
makeLib6
|
||||
makeTypings
|
||||
fi
|
||||
|
||||
rm -f .babelrc
|
||||
|
||||
echo '[B] Done!'
|
20
node_modules/bottleneck/scripts/test_all.sh
generated
vendored
Executable file
20
node_modules/bottleneck/scripts/test_all.sh
generated
vendored
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
source .env
|
||||
|
||||
echo 'ioredis tests'
|
||||
DATASTORE=ioredis npm test
|
||||
|
||||
echo 'NodeRedis tests'
|
||||
DATASTORE=redis npm test
|
||||
|
||||
echo 'ES5 bundle tests'
|
||||
BUILD=es5 npm test
|
||||
|
||||
echo 'Light bundle tests'
|
||||
BUILD=light npm test
|
||||
|
||||
echo 'Local tests'
|
||||
npm test
|
3
node_modules/bottleneck/scripts/version.js
generated
vendored
Normal file
3
node_modules/bottleneck/scripts/version.js
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
const packagejson = require('../package.json')
|
||||
|
||||
console.log(JSON.stringify({version: packagejson.version}))
|
39
node_modules/bottleneck/src/Batcher.coffee
generated
vendored
Normal file
39
node_modules/bottleneck/src/Batcher.coffee
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
parser = require "./parser"
|
||||
Events = require "./Events"
|
||||
|
||||
class Batcher
|
||||
defaults:
|
||||
maxTime: null
|
||||
maxSize: null
|
||||
Promise: Promise
|
||||
|
||||
constructor: (@options={}) ->
|
||||
parser.load @options, @defaults, @
|
||||
@Events = new Events @
|
||||
@_arr = []
|
||||
@_resetPromise()
|
||||
@_lastFlush = Date.now()
|
||||
|
||||
_resetPromise: ->
|
||||
@_promise = new @Promise (res, rej) => @_resolve = res
|
||||
|
||||
_flush: ->
|
||||
clearTimeout @_timeout
|
||||
@_lastFlush = Date.now()
|
||||
@_resolve()
|
||||
@Events.trigger "batch", @_arr
|
||||
@_arr = []
|
||||
@_resetPromise()
|
||||
|
||||
add: (data) ->
|
||||
@_arr.push data
|
||||
ret = @_promise
|
||||
if @_arr.length == @maxSize
|
||||
@_flush()
|
||||
else if @maxTime? and @_arr.length == 1
|
||||
@_timeout = setTimeout =>
|
||||
@_flush()
|
||||
, @maxTime
|
||||
ret
|
||||
|
||||
module.exports = Batcher
|
298
node_modules/bottleneck/src/Bottleneck.coffee
generated
vendored
Normal file
298
node_modules/bottleneck/src/Bottleneck.coffee
generated
vendored
Normal file
@ -0,0 +1,298 @@
|
||||
NUM_PRIORITIES = 10
|
||||
DEFAULT_PRIORITY = 5
|
||||
|
||||
parser = require "./parser"
|
||||
Queues = require "./Queues"
|
||||
Job = require "./Job"
|
||||
LocalDatastore = require "./LocalDatastore"
|
||||
RedisDatastore = require "./RedisDatastore"
|
||||
Events = require "./Events"
|
||||
States = require "./States"
|
||||
Sync = require "./Sync"
|
||||
|
||||
class Bottleneck
|
||||
Bottleneck.default = Bottleneck
|
||||
Bottleneck.Events = Events
|
||||
Bottleneck.version = Bottleneck::version = require("./version.json").version
|
||||
Bottleneck.strategy = Bottleneck::strategy = { LEAK:1, OVERFLOW:2, OVERFLOW_PRIORITY:4, BLOCK:3 }
|
||||
Bottleneck.BottleneckError = Bottleneck::BottleneckError = require "./BottleneckError"
|
||||
Bottleneck.Group = Bottleneck::Group = require "./Group"
|
||||
Bottleneck.RedisConnection = Bottleneck::RedisConnection = require "./RedisConnection"
|
||||
Bottleneck.IORedisConnection = Bottleneck::IORedisConnection = require "./IORedisConnection"
|
||||
Bottleneck.Batcher = Bottleneck::Batcher = require "./Batcher"
|
||||
jobDefaults:
|
||||
priority: DEFAULT_PRIORITY
|
||||
weight: 1
|
||||
expiration: null
|
||||
id: "<no-id>"
|
||||
storeDefaults:
|
||||
maxConcurrent: null
|
||||
minTime: 0
|
||||
highWater: null
|
||||
strategy: Bottleneck::strategy.LEAK
|
||||
penalty: null
|
||||
reservoir: null
|
||||
reservoirRefreshInterval: null
|
||||
reservoirRefreshAmount: null
|
||||
reservoirIncreaseInterval: null
|
||||
reservoirIncreaseAmount: null
|
||||
reservoirIncreaseMaximum: null
|
||||
localStoreDefaults:
|
||||
Promise: Promise
|
||||
timeout: null
|
||||
heartbeatInterval: 250
|
||||
redisStoreDefaults:
|
||||
Promise: Promise
|
||||
timeout: null
|
||||
heartbeatInterval: 5000
|
||||
clientTimeout: 10000
|
||||
Redis: null
|
||||
clientOptions: {}
|
||||
clusterNodes: null
|
||||
clearDatastore: false
|
||||
connection: null
|
||||
instanceDefaults:
|
||||
datastore: "local"
|
||||
connection: null
|
||||
id: "<no-id>"
|
||||
rejectOnDrop: true
|
||||
trackDoneStatus: false
|
||||
Promise: Promise
|
||||
stopDefaults:
|
||||
enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs."
|
||||
dropWaitingJobs: true
|
||||
dropErrorMessage: "This limiter has been stopped."
|
||||
|
||||
constructor: (options={}, invalid...) ->
|
||||
@_validateOptions options, invalid
|
||||
parser.load options, @instanceDefaults, @
|
||||
@_queues = new Queues NUM_PRIORITIES
|
||||
@_scheduled = {}
|
||||
@_states = new States ["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(if @trackDoneStatus then ["DONE"] else [])
|
||||
@_limiter = null
|
||||
@Events = new Events @
|
||||
@_submitLock = new Sync "submit", @Promise
|
||||
@_registerLock = new Sync "register", @Promise
|
||||
storeOptions = parser.load options, @storeDefaults, {}
|
||||
|
||||
@_store = if @datastore == "redis" or @datastore == "ioredis" or @connection?
|
||||
storeInstanceOptions = parser.load options, @redisStoreDefaults, {}
|
||||
new RedisDatastore @, storeOptions, storeInstanceOptions
|
||||
else if @datastore == "local"
|
||||
storeInstanceOptions = parser.load options, @localStoreDefaults, {}
|
||||
new LocalDatastore @, storeOptions, storeInstanceOptions
|
||||
else
|
||||
throw new Bottleneck::BottleneckError "Invalid datastore type: #{@datastore}"
|
||||
|
||||
@_queues.on "leftzero", => @_store.heartbeat?.ref?()
|
||||
@_queues.on "zero", => @_store.heartbeat?.unref?()
|
||||
|
||||
_validateOptions: (options, invalid) ->
|
||||
unless options? and typeof options == "object" and invalid.length == 0
|
||||
throw new Bottleneck::BottleneckError "Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1."
|
||||
|
||||
ready: -> @_store.ready
|
||||
|
||||
clients: -> @_store.clients
|
||||
|
||||
channel: -> "b_#{@id}"
|
||||
|
||||
channel_client: -> "b_#{@id}_#{@_store.clientId}"
|
||||
|
||||
publish: (message) -> @_store.__publish__ message
|
||||
|
||||
disconnect: (flush=true) -> @_store.__disconnect__ flush
|
||||
|
||||
chain: (@_limiter) -> @
|
||||
|
||||
queued: (priority) -> @_queues.queued priority
|
||||
|
||||
clusterQueued: -> @_store.__queued__()
|
||||
|
||||
empty: -> @queued() == 0 and @_submitLock.isEmpty()
|
||||
|
||||
running: -> @_store.__running__()
|
||||
|
||||
done: -> @_store.__done__()
|
||||
|
||||
jobStatus: (id) -> @_states.jobStatus id
|
||||
|
||||
jobs: (status) -> @_states.statusJobs status
|
||||
|
||||
counts: -> @_states.statusCounts()
|
||||
|
||||
_randomIndex: -> Math.random().toString(36).slice(2)
|
||||
|
||||
check: (weight=1) -> @_store.__check__ weight
|
||||
|
||||
_clearGlobalState: (index) ->
|
||||
if @_scheduled[index]?
|
||||
clearTimeout @_scheduled[index].expiration
|
||||
delete @_scheduled[index]
|
||||
true
|
||||
else false
|
||||
|
||||
_free: (index, job, options, eventInfo) ->
|
||||
try
|
||||
{ running } = await @_store.__free__ index, options.weight
|
||||
@Events.trigger "debug", "Freed #{options.id}", eventInfo
|
||||
if running == 0 and @empty() then @Events.trigger "idle"
|
||||
catch e
|
||||
@Events.trigger "error", e
|
||||
|
||||
_run: (index, job, wait) ->
|
||||
job.doRun()
|
||||
clearGlobalState = @_clearGlobalState.bind @, index
|
||||
run = @_run.bind @, index, job
|
||||
free = @_free.bind @, index, job
|
||||
|
||||
@_scheduled[index] =
|
||||
timeout: setTimeout =>
|
||||
job.doExecute @_limiter, clearGlobalState, run, free
|
||||
, wait
|
||||
expiration: if job.options.expiration? then setTimeout ->
|
||||
job.doExpire clearGlobalState, run, free
|
||||
, wait + job.options.expiration
|
||||
job: job
|
||||
|
||||
_drainOne: (capacity) ->
|
||||
@_registerLock.schedule =>
|
||||
if @queued() == 0 then return @Promise.resolve null
|
||||
queue = @_queues.getFirst()
|
||||
{ options, args } = next = queue.first()
|
||||
if capacity? and options.weight > capacity then return @Promise.resolve null
|
||||
@Events.trigger "debug", "Draining #{options.id}", { args, options }
|
||||
index = @_randomIndex()
|
||||
@_store.__register__ index, options.weight, options.expiration
|
||||
.then ({ success, wait, reservoir }) =>
|
||||
@Events.trigger "debug", "Drained #{options.id}", { success, args, options }
|
||||
if success
|
||||
queue.shift()
|
||||
empty = @empty()
|
||||
if empty then @Events.trigger "empty"
|
||||
if reservoir == 0 then @Events.trigger "depleted", empty
|
||||
@_run index, next, wait
|
||||
@Promise.resolve options.weight
|
||||
else
|
||||
@Promise.resolve null
|
||||
|
||||
_drainAll: (capacity, total=0) ->
|
||||
@_drainOne(capacity)
|
||||
.then (drained) =>
|
||||
if drained?
|
||||
newCapacity = if capacity? then capacity - drained else capacity
|
||||
@_drainAll(newCapacity, total + drained)
|
||||
else @Promise.resolve total
|
||||
.catch (e) => @Events.trigger "error", e
|
||||
|
||||
_dropAllQueued: (message) -> @_queues.shiftAll (job) -> job.doDrop { message }
|
||||
|
||||
stop: (options={}) ->
|
||||
options = parser.load options, @stopDefaults
|
||||
waitForExecuting = (at) =>
|
||||
finished = =>
|
||||
counts = @_states.counts
|
||||
(counts[0] + counts[1] + counts[2] + counts[3]) == at
|
||||
new @Promise (resolve, reject) =>
|
||||
if finished() then resolve()
|
||||
else
|
||||
@on "done", =>
|
||||
if finished()
|
||||
@removeAllListeners "done"
|
||||
resolve()
|
||||
done = if options.dropWaitingJobs
|
||||
@_run = (index, next) -> next.doDrop { message: options.dropErrorMessage }
|
||||
@_drainOne = => @Promise.resolve null
|
||||
@_registerLock.schedule => @_submitLock.schedule =>
|
||||
for k, v of @_scheduled
|
||||
if @jobStatus(v.job.options.id) == "RUNNING"
|
||||
clearTimeout v.timeout
|
||||
clearTimeout v.expiration
|
||||
v.job.doDrop { message: options.dropErrorMessage }
|
||||
@_dropAllQueued options.dropErrorMessage
|
||||
waitForExecuting(0)
|
||||
else
|
||||
@schedule { priority: NUM_PRIORITIES - 1, weight: 0 }, => waitForExecuting(1)
|
||||
@_receive = (job) -> job._reject new Bottleneck::BottleneckError options.enqueueErrorMessage
|
||||
@stop = => @Promise.reject new Bottleneck::BottleneckError "stop() has already been called"
|
||||
done
|
||||
|
||||
_addToQueue: (job) =>
|
||||
{ args, options } = job
|
||||
try
|
||||
{ reachedHWM, blocked, strategy } = await @_store.__submit__ @queued(), options.weight
|
||||
catch error
|
||||
@Events.trigger "debug", "Could not queue #{options.id}", { args, options, error }
|
||||
job.doDrop { error }
|
||||
return false
|
||||
|
||||
if blocked
|
||||
job.doDrop()
|
||||
return true
|
||||
else if reachedHWM
|
||||
shifted = if strategy == Bottleneck::strategy.LEAK then @_queues.shiftLastFrom(options.priority)
|
||||
else if strategy == Bottleneck::strategy.OVERFLOW_PRIORITY then @_queues.shiftLastFrom(options.priority + 1)
|
||||
else if strategy == Bottleneck::strategy.OVERFLOW then job
|
||||
if shifted? then shifted.doDrop()
|
||||
if not shifted? or strategy == Bottleneck::strategy.OVERFLOW
|
||||
if not shifted? then job.doDrop()
|
||||
return reachedHWM
|
||||
|
||||
job.doQueue reachedHWM, blocked
|
||||
@_queues.push job
|
||||
await @_drainAll()
|
||||
reachedHWM
|
||||
|
||||
_receive: (job) ->
|
||||
if @_states.jobStatus(job.options.id)?
|
||||
job._reject new Bottleneck::BottleneckError "A job with the same id already exists (id=#{job.options.id})"
|
||||
false
|
||||
else
|
||||
job.doReceive()
|
||||
@_submitLock.schedule @_addToQueue, job
|
||||
|
||||
submit: (args...) ->
|
||||
if typeof args[0] == "function"
|
||||
[fn, args..., cb] = args
|
||||
options = parser.load {}, @jobDefaults
|
||||
else
|
||||
[options, fn, args..., cb] = args
|
||||
options = parser.load options, @jobDefaults
|
||||
|
||||
task = (args...) =>
|
||||
new @Promise (resolve, reject) ->
|
||||
fn args..., (args...) ->
|
||||
(if args[0]? then reject else resolve) args
|
||||
|
||||
job = new Job task, args, options, @jobDefaults, @rejectOnDrop, @Events, @_states, @Promise
|
||||
job.promise
|
||||
.then (args) -> cb? args...
|
||||
.catch (args) -> if Array.isArray args then cb? args... else cb? args
|
||||
@_receive job
|
||||
|
||||
schedule: (args...) ->
|
||||
if typeof args[0] == "function"
|
||||
[task, args...] = args
|
||||
options = {}
|
||||
else
|
||||
[options, task, args...] = args
|
||||
job = new Job task, args, options, @jobDefaults, @rejectOnDrop, @Events, @_states, @Promise
|
||||
@_receive job
|
||||
job.promise
|
||||
|
||||
wrap: (fn) ->
|
||||
schedule = @schedule.bind @
|
||||
wrapped = (args...) -> schedule fn.bind(@), args...
|
||||
wrapped.withOptions = (options, args...) -> schedule options, fn, args...
|
||||
wrapped
|
||||
|
||||
updateSettings: (options={}) ->
|
||||
await @_store.__updateSettings__ parser.overwrite options, @storeDefaults
|
||||
parser.overwrite options, @instanceDefaults, @
|
||||
@
|
||||
|
||||
currentReservoir: -> @_store.__currentReservoir__()
|
||||
|
||||
incrementReservoir: (incr=0) -> @_store.__incrementReservoir__ incr
|
||||
|
||||
module.exports = Bottleneck
|
3
node_modules/bottleneck/src/BottleneckError.coffee
generated
vendored
Normal file
3
node_modules/bottleneck/src/BottleneckError.coffee
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
class BottleneckError extends Error
|
||||
|
||||
module.exports = BottleneckError
|
38
node_modules/bottleneck/src/DLList.coffee
generated
vendored
Normal file
38
node_modules/bottleneck/src/DLList.coffee
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
class DLList
|
||||
constructor: (@incr, @decr) ->
|
||||
@_first = null
|
||||
@_last = null
|
||||
@length = 0
|
||||
push: (value) ->
|
||||
@length++
|
||||
@incr?()
|
||||
node = { value, prev: @_last, next: null }
|
||||
if @_last?
|
||||
@_last.next = node
|
||||
@_last = node
|
||||
else @_first = @_last = node
|
||||
undefined
|
||||
shift: () ->
|
||||
if not @_first? then return
|
||||
else
|
||||
@length--
|
||||
@decr?()
|
||||
value = @_first.value
|
||||
if (@_first = @_first.next)?
|
||||
@_first.prev = null
|
||||
else
|
||||
@_last = null
|
||||
value
|
||||
first: () -> if @_first? then @_first.value
|
||||
getArray: () ->
|
||||
node = @_first
|
||||
while node? then (ref = node; node = node.next; ref.value)
|
||||
forEachShift: (cb) ->
|
||||
node = @shift()
|
||||
while node? then (cb node; node = @shift())
|
||||
undefined
|
||||
debug: () ->
|
||||
node = @_first
|
||||
while node? then (ref = node; node = node.next; { value: ref.value, prev: ref.prev?.value, next: ref.next?.value })
|
||||
|
||||
module.exports = DLList
|
38
node_modules/bottleneck/src/Events.coffee
generated
vendored
Normal file
38
node_modules/bottleneck/src/Events.coffee
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
class Events
|
||||
constructor: (@instance) ->
|
||||
@_events = {}
|
||||
if @instance.on? or @instance.once? or @instance.removeAllListeners?
|
||||
throw new Error "An Emitter already exists for this object"
|
||||
@instance.on = (name, cb) => @_addListener name, "many", cb
|
||||
@instance.once = (name, cb) => @_addListener name, "once", cb
|
||||
@instance.removeAllListeners = (name=null) =>
|
||||
if name? then delete @_events[name] else @_events = {}
|
||||
_addListener: (name, status, cb) ->
|
||||
@_events[name] ?= []
|
||||
@_events[name].push {cb, status}
|
||||
@instance
|
||||
listenerCount: (name) ->
|
||||
if @_events[name]? then @_events[name].length else 0
|
||||
trigger: (name, args...) ->
|
||||
try
|
||||
if name != "debug" then @trigger "debug", "Event triggered: #{name}", args
|
||||
return unless @_events[name]?
|
||||
@_events[name] = @_events[name].filter (listener) -> listener.status != "none"
|
||||
promises = @_events[name].map (listener) =>
|
||||
return if listener.status == "none"
|
||||
if listener.status == "once" then listener.status = "none"
|
||||
try
|
||||
returned = listener.cb?(args...)
|
||||
if typeof returned?.then == "function"
|
||||
await returned
|
||||
else
|
||||
returned
|
||||
catch e
|
||||
if "name" != "error" then @trigger "error", e
|
||||
null
|
||||
(await Promise.all promises).find (x) -> x?
|
||||
catch e
|
||||
if "name" != "error" then @trigger "error", e
|
||||
null
|
||||
|
||||
module.exports = Events
|
80
node_modules/bottleneck/src/Group.coffee
generated
vendored
Normal file
80
node_modules/bottleneck/src/Group.coffee
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
parser = require "./parser"
|
||||
Events = require "./Events"
|
||||
RedisConnection = require "./RedisConnection"
|
||||
IORedisConnection = require "./IORedisConnection"
|
||||
Scripts = require "./Scripts"
|
||||
|
||||
class Group
|
||||
defaults:
|
||||
timeout: 1000 * 60 * 5
|
||||
connection: null
|
||||
Promise: Promise
|
||||
id: "group-key"
|
||||
|
||||
constructor: (@limiterOptions={}) ->
|
||||
parser.load @limiterOptions, @defaults, @
|
||||
@Events = new Events @
|
||||
@instances = {}
|
||||
@Bottleneck = require "./Bottleneck"
|
||||
@_startAutoCleanup()
|
||||
@sharedConnection = @connection?
|
||||
|
||||
if !@connection?
|
||||
if @limiterOptions.datastore == "redis"
|
||||
@connection = new RedisConnection Object.assign {}, @limiterOptions, { @Events }
|
||||
else if @limiterOptions.datastore == "ioredis"
|
||||
@connection = new IORedisConnection Object.assign {}, @limiterOptions, { @Events }
|
||||
|
||||
key: (key="") -> @instances[key] ? do =>
|
||||
limiter = @instances[key] = new @Bottleneck Object.assign @limiterOptions, {
|
||||
id: "#{@id}-#{key}",
|
||||
@timeout,
|
||||
@connection
|
||||
}
|
||||
@Events.trigger "created", limiter, key
|
||||
limiter
|
||||
|
||||
deleteKey: (key="") =>
|
||||
instance = @instances[key]
|
||||
if @connection
|
||||
deleted = await @connection.__runCommand__ ['del', Scripts.allKeys("#{@id}-#{key}")...]
|
||||
if instance?
|
||||
delete @instances[key]
|
||||
await instance.disconnect()
|
||||
instance? or deleted > 0
|
||||
|
||||
limiters: -> { key: k, limiter: v } for k, v of @instances
|
||||
|
||||
keys: -> Object.keys @instances
|
||||
|
||||
clusterKeys: ->
|
||||
if !@connection? then return @Promise.resolve @keys()
|
||||
keys = []
|
||||
cursor = null
|
||||
start = "b_#{@id}-".length
|
||||
end = "_settings".length
|
||||
until cursor == 0
|
||||
[next, found] = await @connection.__runCommand__ ["scan", (cursor ? 0), "match", "b_#{@id}-*_settings", "count", 10000]
|
||||
cursor = ~~next
|
||||
keys.push(k.slice(start, -end)) for k in found
|
||||
keys
|
||||
|
||||
_startAutoCleanup: ->
|
||||
clearInterval @interval
|
||||
(@interval = setInterval =>
|
||||
time = Date.now()
|
||||
for k, v of @instances
|
||||
try if await v._store.__groupCheck__(time) then @deleteKey k
|
||||
catch e then v.Events.trigger "error", e
|
||||
, (@timeout / 2)).unref?()
|
||||
|
||||
updateSettings: (options={}) ->
|
||||
parser.overwrite options, @defaults, @
|
||||
parser.overwrite options, options, @limiterOptions
|
||||
@_startAutoCleanup() if options.timeout?
|
||||
|
||||
disconnect: (flush=true) ->
|
||||
if !@sharedConnection
|
||||
@connection?.disconnect flush
|
||||
|
||||
module.exports = Group
|
84
node_modules/bottleneck/src/IORedisConnection.coffee
generated
vendored
Normal file
84
node_modules/bottleneck/src/IORedisConnection.coffee
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
parser = require "./parser"
|
||||
Events = require "./Events"
|
||||
Scripts = require "./Scripts"
|
||||
|
||||
class IORedisConnection
|
||||
datastore: "ioredis"
|
||||
defaults:
|
||||
Redis: null
|
||||
clientOptions: {}
|
||||
clusterNodes: null
|
||||
client: null
|
||||
Promise: Promise
|
||||
Events: null
|
||||
|
||||
constructor: (options={}) ->
|
||||
parser.load options, @defaults, @
|
||||
@Redis ?= eval("require")("ioredis") # Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option.
|
||||
@Events ?= new Events @
|
||||
@terminated = false
|
||||
|
||||
if @clusterNodes?
|
||||
@client = new @Redis.Cluster @clusterNodes, @clientOptions
|
||||
@subscriber = new @Redis.Cluster @clusterNodes, @clientOptions
|
||||
else if @client? and !@client.duplicate?
|
||||
@subscriber = new @Redis.Cluster @client.startupNodes, @client.options
|
||||
else
|
||||
@client ?= new @Redis @clientOptions
|
||||
@subscriber = @client.duplicate()
|
||||
@limiters = {}
|
||||
|
||||
@ready = @Promise.all [@_setup(@client, false), @_setup(@subscriber, true)]
|
||||
.then =>
|
||||
@_loadScripts()
|
||||
{ @client, @subscriber }
|
||||
|
||||
_setup: (client, sub) ->
|
||||
client.setMaxListeners 0
|
||||
new @Promise (resolve, reject) =>
|
||||
client.on "error", (e) => @Events.trigger "error", e
|
||||
if sub
|
||||
client.on "message", (channel, message) =>
|
||||
@limiters[channel]?._store.onMessage channel, message
|
||||
if client.status == "ready" then resolve()
|
||||
else client.once "ready", resolve
|
||||
|
||||
_loadScripts: -> Scripts.names.forEach (name) => @client.defineCommand name, { lua: Scripts.payload(name) }
|
||||
|
||||
__runCommand__: (cmd) ->
|
||||
await @ready
|
||||
[[_, deleted]] = await @client.pipeline([cmd]).exec()
|
||||
deleted
|
||||
|
||||
__addLimiter__: (instance) ->
|
||||
@Promise.all [instance.channel(), instance.channel_client()].map (channel) =>
|
||||
new @Promise (resolve, reject) =>
|
||||
@subscriber.subscribe channel, =>
|
||||
@limiters[channel] = instance
|
||||
resolve()
|
||||
|
||||
__removeLimiter__: (instance) ->
|
||||
[instance.channel(), instance.channel_client()].forEach (channel) =>
|
||||
await @subscriber.unsubscribe channel unless @terminated
|
||||
delete @limiters[channel]
|
||||
|
||||
__scriptArgs__: (name, id, args, cb) ->
|
||||
keys = Scripts.keys name, id
|
||||
[keys.length].concat keys, args, cb
|
||||
|
||||
__scriptFn__: (name) ->
|
||||
@client[name].bind(@client)
|
||||
|
||||
disconnect: (flush=true) ->
|
||||
clearInterval(@limiters[k]._store.heartbeat) for k in Object.keys @limiters
|
||||
@limiters = {}
|
||||
@terminated = true
|
||||
|
||||
if flush
|
||||
@Promise.all [@client.quit(), @subscriber.quit()]
|
||||
else
|
||||
@client.disconnect()
|
||||
@subscriber.disconnect()
|
||||
@Promise.resolve()
|
||||
|
||||
module.exports = IORedisConnection
|
98
node_modules/bottleneck/src/Job.coffee
generated
vendored
Normal file
98
node_modules/bottleneck/src/Job.coffee
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
NUM_PRIORITIES = 10
|
||||
DEFAULT_PRIORITY = 5
|
||||
|
||||
parser = require "./parser"
|
||||
BottleneckError = require "./BottleneckError"
|
||||
|
||||
class Job
|
||||
constructor: (@task, @args, options, jobDefaults, @rejectOnDrop, @Events, @_states, @Promise) ->
|
||||
@options = parser.load options, jobDefaults
|
||||
@options.priority = @_sanitizePriority @options.priority
|
||||
if @options.id == jobDefaults.id then @options.id = "#{@options.id}-#{@_randomIndex()}"
|
||||
@promise = new @Promise (@_resolve, @_reject) =>
|
||||
@retryCount = 0
|
||||
|
||||
_sanitizePriority: (priority) ->
|
||||
sProperty = if ~~priority != priority then DEFAULT_PRIORITY else priority
|
||||
if sProperty < 0 then 0 else if sProperty > NUM_PRIORITIES-1 then NUM_PRIORITIES-1 else sProperty
|
||||
|
||||
_randomIndex: -> Math.random().toString(36).slice(2)
|
||||
|
||||
doDrop: ({ error, message="This job has been dropped by Bottleneck" } = {}) ->
|
||||
if @_states.remove @options.id
|
||||
if @rejectOnDrop then @_reject (error ? new BottleneckError message)
|
||||
@Events.trigger "dropped", { @args, @options, @task, @promise }
|
||||
true
|
||||
else
|
||||
false
|
||||
|
||||
_assertStatus: (expected) ->
|
||||
status = @_states.jobStatus @options.id
|
||||
if not (status == expected or (expected == "DONE" and status == null))
|
||||
throw new BottleneckError "Invalid job status #{status}, expected #{expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues"
|
||||
|
||||
doReceive: () ->
|
||||
@_states.start @options.id
|
||||
@Events.trigger "received", { @args, @options }
|
||||
|
||||
doQueue: (reachedHWM, blocked) ->
|
||||
@_assertStatus "RECEIVED"
|
||||
@_states.next @options.id
|
||||
@Events.trigger "queued", { @args, @options, reachedHWM, blocked }
|
||||
|
||||
doRun: () ->
|
||||
if @retryCount == 0
|
||||
@_assertStatus "QUEUED"
|
||||
@_states.next @options.id
|
||||
else @_assertStatus "EXECUTING"
|
||||
@Events.trigger "scheduled", { @args, @options }
|
||||
|
||||
doExecute: (chained, clearGlobalState, run, free) ->
|
||||
if @retryCount == 0
|
||||
@_assertStatus "RUNNING"
|
||||
@_states.next @options.id
|
||||
else @_assertStatus "EXECUTING"
|
||||
eventInfo = { @args, @options, @retryCount }
|
||||
@Events.trigger "executing", eventInfo
|
||||
|
||||
try
|
||||
passed = await if chained?
|
||||
chained.schedule @options, @task, @args...
|
||||
else @task @args...
|
||||
|
||||
if clearGlobalState()
|
||||
@doDone eventInfo
|
||||
await free @options, eventInfo
|
||||
@_assertStatus "DONE"
|
||||
@_resolve passed
|
||||
catch error
|
||||
@_onFailure error, eventInfo, clearGlobalState, run, free
|
||||
|
||||
doExpire: (clearGlobalState, run, free) ->
|
||||
if @_states.jobStatus @options.id == "RUNNING"
|
||||
@_states.next @options.id
|
||||
@_assertStatus "EXECUTING"
|
||||
eventInfo = { @args, @options, @retryCount }
|
||||
error = new BottleneckError "This job timed out after #{@options.expiration} ms."
|
||||
@_onFailure error, eventInfo, clearGlobalState, run, free
|
||||
|
||||
_onFailure: (error, eventInfo, clearGlobalState, run, free) ->
|
||||
if clearGlobalState()
|
||||
retry = await @Events.trigger "failed", error, eventInfo
|
||||
if retry?
|
||||
retryAfter = ~~retry
|
||||
@Events.trigger "retry", "Retrying #{@options.id} after #{retryAfter} ms", eventInfo
|
||||
@retryCount++
|
||||
run retryAfter
|
||||
else
|
||||
@doDone eventInfo
|
||||
await free @options, eventInfo
|
||||
@_assertStatus "DONE"
|
||||
@_reject error
|
||||
|
||||
doDone: (eventInfo) ->
|
||||
@_assertStatus "EXECUTING"
|
||||
@_states.next @options.id
|
||||
@Events.trigger "done", eventInfo
|
||||
|
||||
module.exports = Job
|
140
node_modules/bottleneck/src/LocalDatastore.coffee
generated
vendored
Normal file
140
node_modules/bottleneck/src/LocalDatastore.coffee
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
parser = require "./parser"
|
||||
BottleneckError = require "./BottleneckError"
|
||||
|
||||
class LocalDatastore
|
||||
constructor: (@instance, @storeOptions, storeInstanceOptions) ->
|
||||
@clientId = @instance._randomIndex()
|
||||
parser.load storeInstanceOptions, storeInstanceOptions, @
|
||||
@_nextRequest = @_lastReservoirRefresh = @_lastReservoirIncrease = Date.now()
|
||||
@_running = 0
|
||||
@_done = 0
|
||||
@_unblockTime = 0
|
||||
@ready = @Promise.resolve()
|
||||
@clients = {}
|
||||
@_startHeartbeat()
|
||||
|
||||
_startHeartbeat: ->
|
||||
if !@heartbeat? and ((
|
||||
@storeOptions.reservoirRefreshInterval? and @storeOptions.reservoirRefreshAmount?
|
||||
) or (
|
||||
@storeOptions.reservoirIncreaseInterval? and @storeOptions.reservoirIncreaseAmount?
|
||||
))
|
||||
(@heartbeat = setInterval =>
|
||||
now = Date.now()
|
||||
|
||||
if @storeOptions.reservoirRefreshInterval? and now >= @_lastReservoirRefresh + @storeOptions.reservoirRefreshInterval
|
||||
@_lastReservoirRefresh = now
|
||||
@storeOptions.reservoir = @storeOptions.reservoirRefreshAmount
|
||||
@instance._drainAll @computeCapacity()
|
||||
|
||||
if @storeOptions.reservoirIncreaseInterval? and now >= @_lastReservoirIncrease + @storeOptions.reservoirIncreaseInterval
|
||||
{ reservoirIncreaseAmount: amount, reservoirIncreaseMaximum: maximum, reservoir } = @storeOptions
|
||||
@_lastReservoirIncrease = now
|
||||
incr = if maximum? then Math.min amount, maximum - reservoir else amount
|
||||
if incr > 0
|
||||
@storeOptions.reservoir += incr
|
||||
@instance._drainAll @computeCapacity()
|
||||
|
||||
, @heartbeatInterval).unref?()
|
||||
else clearInterval @heartbeat
|
||||
|
||||
__publish__: (message) ->
|
||||
await @yieldLoop()
|
||||
@instance.Events.trigger "message", message.toString()
|
||||
|
||||
__disconnect__: (flush) ->
|
||||
await @yieldLoop()
|
||||
clearInterval @heartbeat
|
||||
@Promise.resolve()
|
||||
|
||||
yieldLoop: (t=0) -> new @Promise (resolve, reject) -> setTimeout resolve, t
|
||||
|
||||
computePenalty: -> @storeOptions.penalty ? ((15 * @storeOptions.minTime) or 5000)
|
||||
|
||||
__updateSettings__: (options) ->
|
||||
await @yieldLoop()
|
||||
parser.overwrite options, options, @storeOptions
|
||||
@_startHeartbeat()
|
||||
@instance._drainAll @computeCapacity()
|
||||
true
|
||||
|
||||
__running__: ->
|
||||
await @yieldLoop()
|
||||
@_running
|
||||
|
||||
__queued__: ->
|
||||
await @yieldLoop()
|
||||
@instance.queued()
|
||||
|
||||
__done__: ->
|
||||
await @yieldLoop()
|
||||
@_done
|
||||
|
||||
__groupCheck__: (time) ->
|
||||
await @yieldLoop()
|
||||
(@_nextRequest + @timeout) < time
|
||||
|
||||
computeCapacity: ->
|
||||
{ maxConcurrent, reservoir } = @storeOptions
|
||||
if maxConcurrent? and reservoir? then Math.min((maxConcurrent - @_running), reservoir)
|
||||
else if maxConcurrent? then maxConcurrent - @_running
|
||||
else if reservoir? then reservoir
|
||||
else null
|
||||
|
||||
conditionsCheck: (weight) ->
|
||||
capacity = @computeCapacity()
|
||||
not capacity? or weight <= capacity
|
||||
|
||||
__incrementReservoir__: (incr) ->
|
||||
await @yieldLoop()
|
||||
reservoir = @storeOptions.reservoir += incr
|
||||
@instance._drainAll @computeCapacity()
|
||||
reservoir
|
||||
|
||||
__currentReservoir__: ->
|
||||
await @yieldLoop()
|
||||
@storeOptions.reservoir
|
||||
|
||||
isBlocked: (now) -> @_unblockTime >= now
|
||||
|
||||
check: (weight, now) -> @conditionsCheck(weight) and (@_nextRequest - now) <= 0
|
||||
|
||||
__check__: (weight) ->
|
||||
await @yieldLoop()
|
||||
now = Date.now()
|
||||
@check weight, now
|
||||
|
||||
__register__: (index, weight, expiration) ->
|
||||
await @yieldLoop()
|
||||
now = Date.now()
|
||||
if @conditionsCheck weight
|
||||
@_running += weight
|
||||
if @storeOptions.reservoir? then @storeOptions.reservoir -= weight
|
||||
wait = Math.max @_nextRequest - now, 0
|
||||
@_nextRequest = now + wait + @storeOptions.minTime
|
||||
{ success: true, wait, reservoir: @storeOptions.reservoir }
|
||||
else { success: false }
|
||||
|
||||
strategyIsBlock: -> @storeOptions.strategy == 3
|
||||
|
||||
__submit__: (queueLength, weight) ->
|
||||
await @yieldLoop()
|
||||
if @storeOptions.maxConcurrent? and weight > @storeOptions.maxConcurrent
|
||||
throw new BottleneckError("Impossible to add a job having a weight of #{weight} to a limiter having a maxConcurrent setting of #{@storeOptions.maxConcurrent}")
|
||||
now = Date.now()
|
||||
reachedHWM = @storeOptions.highWater? and queueLength == @storeOptions.highWater and not @check(weight, now)
|
||||
blocked = @strategyIsBlock() and (reachedHWM or @isBlocked now)
|
||||
if blocked
|
||||
@_unblockTime = now + @computePenalty()
|
||||
@_nextRequest = @_unblockTime + @storeOptions.minTime
|
||||
@instance._dropAllQueued()
|
||||
{ reachedHWM, blocked, strategy: @storeOptions.strategy }
|
||||
|
||||
__free__: (index, weight) ->
|
||||
await @yieldLoop()
|
||||
@_running -= weight
|
||||
@_done += weight
|
||||
@instance._drainAll @computeCapacity()
|
||||
{ running: @_running }
|
||||
|
||||
module.exports = LocalDatastore
|
28
node_modules/bottleneck/src/Queues.coffee
generated
vendored
Normal file
28
node_modules/bottleneck/src/Queues.coffee
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
DLList = require "./DLList"
|
||||
Events = require "./Events"
|
||||
|
||||
class Queues
|
||||
|
||||
constructor: (num_priorities) ->
|
||||
@Events = new Events @
|
||||
@_length = 0
|
||||
@_lists = for i in [1..num_priorities] then new DLList (=> @incr()), (=> @decr())
|
||||
|
||||
incr: -> if @_length++ == 0 then @Events.trigger "leftzero"
|
||||
|
||||
decr: -> if --@_length == 0 then @Events.trigger "zero"
|
||||
|
||||
push: (job) -> @_lists[job.options.priority].push job
|
||||
|
||||
queued: (priority) -> if priority? then @_lists[priority].length else @_length
|
||||
|
||||
shiftAll: (fn) -> @_lists.forEach (list) -> list.forEachShift fn
|
||||
|
||||
getFirst: (arr=@_lists) ->
|
||||
for list in arr
|
||||
return list if list.length > 0
|
||||
[]
|
||||
|
||||
shiftLastFrom: (priority) -> @getFirst(@_lists[priority..].reverse()).shift()
|
||||
|
||||
module.exports = Queues
|
91
node_modules/bottleneck/src/RedisConnection.coffee
generated
vendored
Normal file
91
node_modules/bottleneck/src/RedisConnection.coffee
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
parser = require "./parser"
|
||||
Events = require "./Events"
|
||||
Scripts = require "./Scripts"
|
||||
|
||||
class RedisConnection
|
||||
datastore: "redis"
|
||||
defaults:
|
||||
Redis: null
|
||||
clientOptions: {}
|
||||
client: null
|
||||
Promise: Promise
|
||||
Events: null
|
||||
|
||||
constructor: (options={}) ->
|
||||
parser.load options, @defaults, @
|
||||
@Redis ?= eval("require")("redis") # Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option.
|
||||
@Events ?= new Events @
|
||||
@terminated = false
|
||||
|
||||
@client ?= @Redis.createClient @clientOptions
|
||||
@subscriber = @client.duplicate()
|
||||
@limiters = {}
|
||||
@shas = {}
|
||||
|
||||
@ready = @Promise.all [@_setup(@client, false), @_setup(@subscriber, true)]
|
||||
.then => @_loadScripts()
|
||||
.then => { @client, @subscriber }
|
||||
|
||||
_setup: (client, sub) ->
|
||||
client.setMaxListeners 0
|
||||
new @Promise (resolve, reject) =>
|
||||
client.on "error", (e) => @Events.trigger "error", e
|
||||
if sub
|
||||
client.on "message", (channel, message) =>
|
||||
@limiters[channel]?._store.onMessage channel, message
|
||||
if client.ready then resolve()
|
||||
else client.once "ready", resolve
|
||||
|
||||
_loadScript: (name) ->
|
||||
new @Promise (resolve, reject) =>
|
||||
payload = Scripts.payload name
|
||||
@client.multi([["script", "load", payload]]).exec (err, replies) =>
|
||||
if err? then return reject err
|
||||
@shas[name] = replies[0]
|
||||
resolve replies[0]
|
||||
|
||||
_loadScripts: -> @Promise.all(Scripts.names.map (k) => @_loadScript k)
|
||||
|
||||
__runCommand__: (cmd) ->
|
||||
await @ready
|
||||
new @Promise (resolve, reject) =>
|
||||
@client.multi([cmd]).exec_atomic (err, replies) ->
|
||||
if err? then reject(err) else resolve(replies[0])
|
||||
|
||||
__addLimiter__: (instance) ->
|
||||
@Promise.all [instance.channel(), instance.channel_client()].map (channel) =>
|
||||
new @Promise (resolve, reject) =>
|
||||
handler = (chan) =>
|
||||
if chan == channel
|
||||
@subscriber.removeListener "subscribe", handler
|
||||
@limiters[channel] = instance
|
||||
resolve()
|
||||
@subscriber.on "subscribe", handler
|
||||
@subscriber.subscribe channel
|
||||
|
||||
__removeLimiter__: (instance) ->
|
||||
@Promise.all [instance.channel(), instance.channel_client()].map (channel) =>
|
||||
unless @terminated
|
||||
await new @Promise (resolve, reject) =>
|
||||
@subscriber.unsubscribe channel, (err, chan) ->
|
||||
if err? then return reject err
|
||||
if chan == channel then return resolve()
|
||||
delete @limiters[channel]
|
||||
|
||||
__scriptArgs__: (name, id, args, cb) ->
|
||||
keys = Scripts.keys name, id
|
||||
[@shas[name], keys.length].concat keys, args, cb
|
||||
|
||||
__scriptFn__: (name) ->
|
||||
@client.evalsha.bind(@client)
|
||||
|
||||
disconnect: (flush=true) ->
|
||||
clearInterval(@limiters[k]._store.heartbeat) for k in Object.keys @limiters
|
||||
@limiters = {}
|
||||
@terminated = true
|
||||
|
||||
@client.end flush
|
||||
@subscriber.end flush
|
||||
@Promise.resolve()
|
||||
|
||||
module.exports = RedisConnection
|
158
node_modules/bottleneck/src/RedisDatastore.coffee
generated
vendored
Normal file
158
node_modules/bottleneck/src/RedisDatastore.coffee
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
parser = require "./parser"
|
||||
BottleneckError = require "./BottleneckError"
|
||||
RedisConnection = require "./RedisConnection"
|
||||
IORedisConnection = require "./IORedisConnection"
|
||||
|
||||
class RedisDatastore
|
||||
constructor: (@instance, @storeOptions, storeInstanceOptions) ->
|
||||
@originalId = @instance.id
|
||||
@clientId = @instance._randomIndex()
|
||||
parser.load storeInstanceOptions, storeInstanceOptions, @
|
||||
@clients = {}
|
||||
@capacityPriorityCounters = {}
|
||||
@sharedConnection = @connection?
|
||||
|
||||
@connection ?= if @instance.datastore == "redis" then new RedisConnection { @Redis, @clientOptions, @Promise, Events: @instance.Events }
|
||||
else if @instance.datastore == "ioredis" then new IORedisConnection { @Redis, @clientOptions, @clusterNodes, @Promise, Events: @instance.Events }
|
||||
|
||||
@instance.connection = @connection
|
||||
@instance.datastore = @connection.datastore
|
||||
|
||||
@ready = @connection.ready
|
||||
.then (@clients) => @runScript "init", @prepareInitSettings @clearDatastore
|
||||
.then => @connection.__addLimiter__ @instance
|
||||
.then => @runScript "register_client", [@instance.queued()]
|
||||
.then =>
|
||||
(@heartbeat = setInterval =>
|
||||
@runScript "heartbeat", []
|
||||
.catch (e) => @instance.Events.trigger "error", e
|
||||
, @heartbeatInterval).unref?()
|
||||
@clients
|
||||
|
||||
__publish__: (message) ->
|
||||
{ client } = await @ready
|
||||
client.publish(@instance.channel(), "message:#{message.toString()}")
|
||||
|
||||
onMessage: (channel, message) ->
|
||||
try
|
||||
pos = message.indexOf(":")
|
||||
[type, data] = [message.slice(0, pos), message.slice(pos+1)]
|
||||
if type == "capacity"
|
||||
await @instance._drainAll(if data.length > 0 then ~~data)
|
||||
else if type == "capacity-priority"
|
||||
[rawCapacity, priorityClient, counter] = data.split(":")
|
||||
capacity = if rawCapacity.length > 0 then ~~rawCapacity
|
||||
if priorityClient == @clientId
|
||||
drained = await @instance._drainAll(capacity)
|
||||
newCapacity = if capacity? then capacity - (drained or 0) else ""
|
||||
await @clients.client.publish(@instance.channel(), "capacity-priority:#{newCapacity}::#{counter}")
|
||||
else if priorityClient == ""
|
||||
clearTimeout @capacityPriorityCounters[counter]
|
||||
delete @capacityPriorityCounters[counter]
|
||||
@instance._drainAll(capacity)
|
||||
else
|
||||
@capacityPriorityCounters[counter] = setTimeout =>
|
||||
try
|
||||
delete @capacityPriorityCounters[counter]
|
||||
await @runScript "blacklist_client", [priorityClient]
|
||||
await @instance._drainAll(capacity)
|
||||
catch e then @instance.Events.trigger "error", e
|
||||
, 1000
|
||||
else if type == "message"
|
||||
@instance.Events.trigger "message", data
|
||||
else if type == "blocked"
|
||||
await @instance._dropAllQueued()
|
||||
catch e then @instance.Events.trigger "error", e
|
||||
|
||||
__disconnect__: (flush) ->
|
||||
clearInterval @heartbeat
|
||||
if @sharedConnection
|
||||
@connection.__removeLimiter__ @instance
|
||||
else
|
||||
@connection.disconnect flush
|
||||
|
||||
runScript: (name, args) ->
|
||||
await @ready unless name == "init" or name == "register_client"
|
||||
new @Promise (resolve, reject) =>
|
||||
all_args = [Date.now(), @clientId].concat args
|
||||
@instance.Events.trigger "debug", "Calling Redis script: #{name}.lua", all_args
|
||||
arr = @connection.__scriptArgs__ name, @originalId, all_args, (err, replies) ->
|
||||
if err? then return reject err
|
||||
return resolve replies
|
||||
@connection.__scriptFn__(name) arr...
|
||||
.catch (e) =>
|
||||
if e.message == "SETTINGS_KEY_NOT_FOUND"
|
||||
if name == "heartbeat" then @Promise.resolve()
|
||||
else
|
||||
@runScript("init", @prepareInitSettings(false))
|
||||
.then => @runScript(name, args)
|
||||
else if e.message == "UNKNOWN_CLIENT"
|
||||
@runScript("register_client", [@instance.queued()])
|
||||
.then => @runScript(name, args)
|
||||
else @Promise.reject e
|
||||
|
||||
prepareArray: (arr) -> (if x? then x.toString() else "") for x in arr
|
||||
|
||||
prepareObject: (obj) ->
|
||||
arr = []
|
||||
for k, v of obj then arr.push k, (if v? then v.toString() else "")
|
||||
arr
|
||||
|
||||
prepareInitSettings: (clear) ->
|
||||
args = @prepareObject Object.assign({}, @storeOptions, {
|
||||
id: @originalId
|
||||
version: @instance.version
|
||||
groupTimeout: @timeout
|
||||
@clientTimeout
|
||||
})
|
||||
args.unshift (if clear then 1 else 0), @instance.version
|
||||
args
|
||||
|
||||
convertBool: (b) -> !!b
|
||||
|
||||
__updateSettings__: (options) ->
|
||||
await @runScript "update_settings", @prepareObject options
|
||||
parser.overwrite options, options, @storeOptions
|
||||
|
||||
__running__: -> @runScript "running", []
|
||||
|
||||
__queued__: -> @runScript "queued", []
|
||||
|
||||
__done__: -> @runScript "done", []
|
||||
|
||||
__groupCheck__: -> @convertBool await @runScript "group_check", []
|
||||
|
||||
__incrementReservoir__: (incr) -> @runScript "increment_reservoir", [incr]
|
||||
|
||||
__currentReservoir__: -> @runScript "current_reservoir", []
|
||||
|
||||
__check__: (weight) -> @convertBool await @runScript "check", @prepareArray [weight]
|
||||
|
||||
__register__: (index, weight, expiration) ->
|
||||
[success, wait, reservoir] = await @runScript "register", @prepareArray [index, weight, expiration]
|
||||
return {
|
||||
success: @convertBool(success),
|
||||
wait,
|
||||
reservoir
|
||||
}
|
||||
|
||||
__submit__: (queueLength, weight) ->
|
||||
try
|
||||
[reachedHWM, blocked, strategy] = await @runScript "submit", @prepareArray [queueLength, weight]
|
||||
return {
|
||||
reachedHWM: @convertBool(reachedHWM),
|
||||
blocked: @convertBool(blocked),
|
||||
strategy
|
||||
}
|
||||
catch e
|
||||
if e.message.indexOf("OVERWEIGHT") == 0
|
||||
[overweight, weight, maxConcurrent] = e.message.split ":"
|
||||
throw new BottleneckError("Impossible to add a job having a weight of #{weight} to a limiter having a maxConcurrent setting of #{maxConcurrent}")
|
||||
else
|
||||
throw e
|
||||
|
||||
__free__: (index, weight) ->
|
||||
running = await @runScript "free", @prepareArray [index]
|
||||
return { running }
|
||||
|
||||
module.exports = RedisDatastore
|
151
node_modules/bottleneck/src/Scripts.coffee
generated
vendored
Normal file
151
node_modules/bottleneck/src/Scripts.coffee
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
lua = require "./lua.json"
|
||||
|
||||
headers =
|
||||
refs: lua["refs.lua"]
|
||||
validate_keys: lua["validate_keys.lua"]
|
||||
validate_client: lua["validate_client.lua"]
|
||||
refresh_expiration: lua["refresh_expiration.lua"]
|
||||
process_tick: lua["process_tick.lua"]
|
||||
conditions_check: lua["conditions_check.lua"]
|
||||
get_time: lua["get_time.lua"]
|
||||
|
||||
exports.allKeys = (id) -> [
|
||||
###
|
||||
HASH
|
||||
###
|
||||
"b_#{id}_settings"
|
||||
|
||||
###
|
||||
HASH
|
||||
job index -> weight
|
||||
###
|
||||
"b_#{id}_job_weights"
|
||||
|
||||
###
|
||||
ZSET
|
||||
job index -> expiration
|
||||
###
|
||||
"b_#{id}_job_expirations"
|
||||
|
||||
###
|
||||
HASH
|
||||
job index -> client
|
||||
###
|
||||
"b_#{id}_job_clients"
|
||||
|
||||
###
|
||||
ZSET
|
||||
client -> sum running
|
||||
###
|
||||
"b_#{id}_client_running"
|
||||
|
||||
###
|
||||
HASH
|
||||
client -> num queued
|
||||
###
|
||||
"b_#{id}_client_num_queued"
|
||||
|
||||
###
|
||||
ZSET
|
||||
client -> last job registered
|
||||
###
|
||||
"b_#{id}_client_last_registered"
|
||||
|
||||
###
|
||||
ZSET
|
||||
client -> last seen
|
||||
###
|
||||
"b_#{id}_client_last_seen"
|
||||
]
|
||||
|
||||
templates =
|
||||
init:
|
||||
keys: exports.allKeys
|
||||
headers: ["process_tick"]
|
||||
refresh_expiration: true
|
||||
code: lua["init.lua"]
|
||||
group_check:
|
||||
keys: exports.allKeys
|
||||
headers: []
|
||||
refresh_expiration: false
|
||||
code: lua["group_check.lua"]
|
||||
register_client:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys"]
|
||||
refresh_expiration: false
|
||||
code: lua["register_client.lua"]
|
||||
blacklist_client:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client"]
|
||||
refresh_expiration: false
|
||||
code: lua["blacklist_client.lua"]
|
||||
heartbeat:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick"]
|
||||
refresh_expiration: false
|
||||
code: lua["heartbeat.lua"]
|
||||
update_settings:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick"]
|
||||
refresh_expiration: true
|
||||
code: lua["update_settings.lua"]
|
||||
running:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick"]
|
||||
refresh_expiration: false
|
||||
code: lua["running.lua"]
|
||||
queued:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client"]
|
||||
refresh_expiration: false
|
||||
code: lua["queued.lua"]
|
||||
done:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick"]
|
||||
refresh_expiration: false
|
||||
code: lua["done.lua"]
|
||||
check:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"]
|
||||
refresh_expiration: false
|
||||
code: lua["check.lua"]
|
||||
submit:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"]
|
||||
refresh_expiration: true
|
||||
code: lua["submit.lua"]
|
||||
register:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"]
|
||||
refresh_expiration: true
|
||||
code: lua["register.lua"]
|
||||
free:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick"]
|
||||
refresh_expiration: true
|
||||
code: lua["free.lua"]
|
||||
current_reservoir:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick"]
|
||||
refresh_expiration: false
|
||||
code: lua["current_reservoir.lua"]
|
||||
increment_reservoir:
|
||||
keys: exports.allKeys
|
||||
headers: ["validate_keys", "validate_client", "process_tick"]
|
||||
refresh_expiration: true
|
||||
code: lua["increment_reservoir.lua"]
|
||||
|
||||
exports.names = Object.keys templates
|
||||
|
||||
exports.keys = (name, id) ->
|
||||
templates[name].keys id
|
||||
|
||||
exports.payload = (name) ->
|
||||
template = templates[name]
|
||||
Array::concat(
|
||||
headers.refs,
|
||||
template.headers.map((h) -> headers[h]),
|
||||
(if template.refresh_expiration then headers.refresh_expiration else ""),
|
||||
template.code
|
||||
)
|
||||
.join("\n")
|
43
node_modules/bottleneck/src/States.coffee
generated
vendored
Normal file
43
node_modules/bottleneck/src/States.coffee
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
BottleneckError = require "./BottleneckError"
|
||||
class States
|
||||
constructor: (@status) ->
|
||||
@_jobs = {}
|
||||
@counts = @status.map(-> 0)
|
||||
|
||||
next: (id) ->
|
||||
current = @_jobs[id]
|
||||
next = current + 1
|
||||
if current? and next < @status.length
|
||||
@counts[current]--
|
||||
@counts[next]++
|
||||
@_jobs[id]++
|
||||
else if current?
|
||||
@counts[current]--
|
||||
delete @_jobs[id]
|
||||
|
||||
start: (id) ->
|
||||
initial = 0
|
||||
@_jobs[id] = initial
|
||||
@counts[initial]++
|
||||
|
||||
remove: (id) ->
|
||||
current = @_jobs[id]
|
||||
if current?
|
||||
@counts[current]--
|
||||
delete @_jobs[id]
|
||||
current?
|
||||
|
||||
jobStatus: (id) -> @status[@_jobs[id]] ? null
|
||||
|
||||
statusJobs: (status) ->
|
||||
if status?
|
||||
pos = @status.indexOf status
|
||||
if pos < 0
|
||||
throw new BottleneckError "status must be one of #{@status.join ', '}"
|
||||
k for k,v of @_jobs when v == pos
|
||||
else
|
||||
Object.keys @_jobs
|
||||
|
||||
statusCounts: -> @counts.reduce(((acc, v, i) => acc[@status[i]] = v; acc), {})
|
||||
|
||||
module.exports = States
|
28
node_modules/bottleneck/src/Sync.coffee
generated
vendored
Normal file
28
node_modules/bottleneck/src/Sync.coffee
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
DLList = require "./DLList"
|
||||
class Sync
|
||||
constructor: (@name, @Promise) ->
|
||||
@_running = 0
|
||||
@_queue = new DLList()
|
||||
isEmpty: -> @_queue.length == 0
|
||||
_tryToRun: ->
|
||||
if (@_running < 1) and @_queue.length > 0
|
||||
@_running++
|
||||
{ task, args, resolve, reject } = @_queue.shift()
|
||||
cb = try
|
||||
returned = await task args...
|
||||
() -> resolve returned
|
||||
catch error
|
||||
() -> reject error
|
||||
@_running--
|
||||
@_tryToRun()
|
||||
cb()
|
||||
schedule: (task, args...) =>
|
||||
resolve = reject = null
|
||||
promise = new @Promise (_resolve, _reject) ->
|
||||
resolve = _resolve
|
||||
reject = _reject
|
||||
@_queue.push { task, args, resolve, reject }
|
||||
@_tryToRun()
|
||||
promise
|
||||
|
||||
module.exports = Sync
|
3
node_modules/bottleneck/src/es5.coffee
generated
vendored
Normal file
3
node_modules/bottleneck/src/es5.coffee
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
require("regenerator-runtime/runtime")
|
||||
|
||||
module.exports = require "./Bottleneck"
|
1
node_modules/bottleneck/src/index.coffee
generated
vendored
Normal file
1
node_modules/bottleneck/src/index.coffee
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
module.exports = require "./Bottleneck"
|
10
node_modules/bottleneck/src/parser.coffee
generated
vendored
Normal file
10
node_modules/bottleneck/src/parser.coffee
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
exports.load = (received, defaults, onto={}) ->
|
||||
for k, v of defaults
|
||||
onto[k] = received[k] ? v
|
||||
onto
|
||||
|
||||
exports.overwrite = (received, defaults, onto={}) ->
|
||||
for k, v of received
|
||||
if defaults[k] != undefined
|
||||
onto[k] = v
|
||||
onto
|
8
node_modules/bottleneck/src/redis/blacklist_client.lua
generated
vendored
Normal file
8
node_modules/bottleneck/src/redis/blacklist_client.lua
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
local blacklist = ARGV[num_static_argv + 1]
|
||||
|
||||
if redis.call('zscore', client_last_seen_key, blacklist) then
|
||||
redis.call('zadd', client_last_seen_key, 0, blacklist)
|
||||
end
|
||||
|
||||
|
||||
return {}
|
6
node_modules/bottleneck/src/redis/check.lua
generated
vendored
Normal file
6
node_modules/bottleneck/src/redis/check.lua
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
local weight = tonumber(ARGV[num_static_argv + 1])
|
||||
|
||||
local capacity = process_tick(now, false)['capacity']
|
||||
local nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest'))
|
||||
|
||||
return conditions_check(capacity, weight) and nextRequest - now <= 0
|
3
node_modules/bottleneck/src/redis/conditions_check.lua
generated
vendored
Normal file
3
node_modules/bottleneck/src/redis/conditions_check.lua
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
local conditions_check = function (capacity, weight)
|
||||
return capacity == nil or weight <= capacity
|
||||
end
|
1
node_modules/bottleneck/src/redis/current_reservoir.lua
generated
vendored
Normal file
1
node_modules/bottleneck/src/redis/current_reservoir.lua
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
return process_tick(now, false)['reservoir']
|
3
node_modules/bottleneck/src/redis/done.lua
generated
vendored
Normal file
3
node_modules/bottleneck/src/redis/done.lua
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
process_tick(now, false)
|
||||
|
||||
return tonumber(redis.call('hget', settings_key, 'done'))
|
5
node_modules/bottleneck/src/redis/free.lua
generated
vendored
Normal file
5
node_modules/bottleneck/src/redis/free.lua
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
local index = ARGV[num_static_argv + 1]
|
||||
|
||||
redis.call('zadd', job_expirations_key, 0, index)
|
||||
|
||||
return process_tick(now, false)['running']
|
7
node_modules/bottleneck/src/redis/get_time.lua
generated
vendored
Normal file
7
node_modules/bottleneck/src/redis/get_time.lua
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
redis.replicate_commands()
|
||||
|
||||
local get_time = function ()
|
||||
local time = redis.call('time')
|
||||
|
||||
return tonumber(time[1]..string.sub(time[2], 1, 3))
|
||||
end
|
1
node_modules/bottleneck/src/redis/group_check.lua
generated
vendored
Normal file
1
node_modules/bottleneck/src/redis/group_check.lua
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
return not (redis.call('exists', settings_key) == 1)
|
1
node_modules/bottleneck/src/redis/heartbeat.lua
generated
vendored
Normal file
1
node_modules/bottleneck/src/redis/heartbeat.lua
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
process_tick(now, true)
|
10
node_modules/bottleneck/src/redis/increment_reservoir.lua
generated
vendored
Normal file
10
node_modules/bottleneck/src/redis/increment_reservoir.lua
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
local incr = tonumber(ARGV[num_static_argv + 1])
|
||||
|
||||
redis.call('hincrby', settings_key, 'reservoir', incr)
|
||||
|
||||
local reservoir = process_tick(now, true)['reservoir']
|
||||
|
||||
local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))
|
||||
refresh_expiration(0, 0, groupTimeout)
|
||||
|
||||
return reservoir
|
105
node_modules/bottleneck/src/redis/init.lua
generated
vendored
Normal file
105
node_modules/bottleneck/src/redis/init.lua
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
local clear = tonumber(ARGV[num_static_argv + 1])
|
||||
local limiter_version = ARGV[num_static_argv + 2]
|
||||
local num_local_argv = num_static_argv + 2
|
||||
|
||||
if clear == 1 then
|
||||
redis.call('del', unpack(KEYS))
|
||||
end
|
||||
|
||||
if redis.call('exists', settings_key) == 0 then
|
||||
-- Create
|
||||
local args = {'hmset', settings_key}
|
||||
|
||||
for i = num_local_argv + 1, #ARGV do
|
||||
table.insert(args, ARGV[i])
|
||||
end
|
||||
|
||||
redis.call(unpack(args))
|
||||
redis.call('hmset', settings_key,
|
||||
'nextRequest', now,
|
||||
'lastReservoirRefresh', now,
|
||||
'lastReservoirIncrease', now,
|
||||
'running', 0,
|
||||
'done', 0,
|
||||
'unblockTime', 0,
|
||||
'capacityPriorityCounter', 0
|
||||
)
|
||||
|
||||
else
|
||||
-- Apply migrations
|
||||
local settings = redis.call('hmget', settings_key,
|
||||
'id',
|
||||
'version'
|
||||
)
|
||||
local id = settings[1]
|
||||
local current_version = settings[2]
|
||||
|
||||
if current_version ~= limiter_version then
|
||||
local version_digits = {}
|
||||
for k, v in string.gmatch(current_version, "([^.]+)") do
|
||||
table.insert(version_digits, tonumber(k))
|
||||
end
|
||||
|
||||
-- 2.10.0
|
||||
if version_digits[2] < 10 then
|
||||
redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '')
|
||||
redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '')
|
||||
redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '')
|
||||
redis.call('hsetnx', settings_key, 'done', 0)
|
||||
redis.call('hset', settings_key, 'version', '2.10.0')
|
||||
end
|
||||
|
||||
-- 2.11.1
|
||||
if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then
|
||||
if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then
|
||||
redis.call('hmset', settings_key,
|
||||
'lastReservoirRefresh', now,
|
||||
'version', '2.11.1'
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
-- 2.14.0
|
||||
if version_digits[2] < 14 then
|
||||
local old_running_key = 'b_'..id..'_running'
|
||||
local old_executing_key = 'b_'..id..'_executing'
|
||||
|
||||
if redis.call('exists', old_running_key) == 1 then
|
||||
redis.call('rename', old_running_key, job_weights_key)
|
||||
end
|
||||
if redis.call('exists', old_executing_key) == 1 then
|
||||
redis.call('rename', old_executing_key, job_expirations_key)
|
||||
end
|
||||
redis.call('hset', settings_key, 'version', '2.14.0')
|
||||
end
|
||||
|
||||
-- 2.15.2
|
||||
if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then
|
||||
redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0)
|
||||
redis.call('hset', settings_key, 'version', '2.15.2')
|
||||
end
|
||||
|
||||
-- 2.17.0
|
||||
if version_digits[2] < 17 then
|
||||
redis.call('hsetnx', settings_key, 'clientTimeout', 10000)
|
||||
redis.call('hset', settings_key, 'version', '2.17.0')
|
||||
end
|
||||
|
||||
-- 2.18.0
|
||||
if version_digits[2] < 18 then
|
||||
redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '')
|
||||
redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '')
|
||||
redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '')
|
||||
redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now)
|
||||
redis.call('hset', settings_key, 'version', '2.18.0')
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
process_tick(now, false)
|
||||
end
|
||||
|
||||
local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))
|
||||
refresh_expiration(0, 0, groupTimeout)
|
||||
|
||||
return {}
|
214
node_modules/bottleneck/src/redis/process_tick.lua
generated
vendored
Normal file
214
node_modules/bottleneck/src/redis/process_tick.lua
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
local process_tick = function (now, always_publish)
|
||||
|
||||
local compute_capacity = function (maxConcurrent, running, reservoir)
|
||||
if maxConcurrent ~= nil and reservoir ~= nil then
|
||||
return math.min((maxConcurrent - running), reservoir)
|
||||
elseif maxConcurrent ~= nil then
|
||||
return maxConcurrent - running
|
||||
elseif reservoir ~= nil then
|
||||
return reservoir
|
||||
else
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
local settings = redis.call('hmget', settings_key,
|
||||
'id',
|
||||
'maxConcurrent',
|
||||
'running',
|
||||
'reservoir',
|
||||
'reservoirRefreshInterval',
|
||||
'reservoirRefreshAmount',
|
||||
'lastReservoirRefresh',
|
||||
'reservoirIncreaseInterval',
|
||||
'reservoirIncreaseAmount',
|
||||
'reservoirIncreaseMaximum',
|
||||
'lastReservoirIncrease',
|
||||
'capacityPriorityCounter',
|
||||
'clientTimeout'
|
||||
)
|
||||
local id = settings[1]
|
||||
local maxConcurrent = tonumber(settings[2])
|
||||
local running = tonumber(settings[3])
|
||||
local reservoir = tonumber(settings[4])
|
||||
local reservoirRefreshInterval = tonumber(settings[5])
|
||||
local reservoirRefreshAmount = tonumber(settings[6])
|
||||
local lastReservoirRefresh = tonumber(settings[7])
|
||||
local reservoirIncreaseInterval = tonumber(settings[8])
|
||||
local reservoirIncreaseAmount = tonumber(settings[9])
|
||||
local reservoirIncreaseMaximum = tonumber(settings[10])
|
||||
local lastReservoirIncrease = tonumber(settings[11])
|
||||
local capacityPriorityCounter = tonumber(settings[12])
|
||||
local clientTimeout = tonumber(settings[13])
|
||||
|
||||
local initial_capacity = compute_capacity(maxConcurrent, running, reservoir)
|
||||
|
||||
--
|
||||
-- Process 'running' changes
|
||||
--
|
||||
local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now)
|
||||
|
||||
if #expired > 0 then
|
||||
redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now)
|
||||
|
||||
local flush_batch = function (batch, acc)
|
||||
local weights = redis.call('hmget', job_weights_key, unpack(batch))
|
||||
redis.call('hdel', job_weights_key, unpack(batch))
|
||||
local clients = redis.call('hmget', job_clients_key, unpack(batch))
|
||||
redis.call('hdel', job_clients_key, unpack(batch))
|
||||
|
||||
-- Calculate sum of removed weights
|
||||
for i = 1, #weights do
|
||||
acc['total'] = acc['total'] + (tonumber(weights[i]) or 0)
|
||||
end
|
||||
|
||||
-- Calculate sum of removed weights by client
|
||||
local client_weights = {}
|
||||
for i = 1, #clients do
|
||||
local removed = tonumber(weights[i]) or 0
|
||||
if removed > 0 then
|
||||
acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local acc = {
|
||||
['total'] = 0,
|
||||
['client_weights'] = {}
|
||||
}
|
||||
local batch_size = 1000
|
||||
|
||||
-- Compute changes to Zsets and apply changes to Hashes
|
||||
for i = 1, #expired, batch_size do
|
||||
local batch = {}
|
||||
for j = i, math.min(i + batch_size - 1, #expired) do
|
||||
table.insert(batch, expired[j])
|
||||
end
|
||||
|
||||
flush_batch(batch, acc)
|
||||
end
|
||||
|
||||
-- Apply changes to Zsets
|
||||
if acc['total'] > 0 then
|
||||
redis.call('hincrby', settings_key, 'done', acc['total'])
|
||||
running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total']))
|
||||
end
|
||||
|
||||
for client, weight in pairs(acc['client_weights']) do
|
||||
redis.call('zincrby', client_running_key, -weight, client)
|
||||
end
|
||||
end
|
||||
|
||||
--
|
||||
-- Process 'reservoir' changes
|
||||
--
|
||||
local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil
|
||||
if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then
|
||||
reservoir = reservoirRefreshAmount
|
||||
redis.call('hmset', settings_key,
|
||||
'reservoir', reservoir,
|
||||
'lastReservoirRefresh', now
|
||||
)
|
||||
end
|
||||
|
||||
local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil
|
||||
if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then
|
||||
local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval)
|
||||
local incr = reservoirIncreaseAmount * num_intervals
|
||||
if reservoirIncreaseMaximum ~= nil then
|
||||
incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0))
|
||||
end
|
||||
if incr > 0 then
|
||||
reservoir = (reservoir or 0) + incr
|
||||
end
|
||||
redis.call('hmset', settings_key,
|
||||
'reservoir', reservoir,
|
||||
'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval)
|
||||
)
|
||||
end
|
||||
|
||||
--
|
||||
-- Clear unresponsive clients
|
||||
--
|
||||
local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout))
|
||||
local unresponsive_lookup = {}
|
||||
local terminated_clients = {}
|
||||
for i = 1, #unresponsive do
|
||||
unresponsive_lookup[unresponsive[i]] = true
|
||||
if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then
|
||||
table.insert(terminated_clients, unresponsive[i])
|
||||
end
|
||||
end
|
||||
if #terminated_clients > 0 then
|
||||
redis.call('zrem', client_running_key, unpack(terminated_clients))
|
||||
redis.call('hdel', client_num_queued_key, unpack(terminated_clients))
|
||||
redis.call('zrem', client_last_registered_key, unpack(terminated_clients))
|
||||
redis.call('zrem', client_last_seen_key, unpack(terminated_clients))
|
||||
end
|
||||
|
||||
--
|
||||
-- Broadcast capacity changes
|
||||
--
|
||||
local final_capacity = compute_capacity(maxConcurrent, running, reservoir)
|
||||
|
||||
if always_publish or (initial_capacity ~= nil and final_capacity == nil) then
|
||||
-- always_publish or was not unlimited, now unlimited
|
||||
redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))
|
||||
|
||||
elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then
|
||||
-- capacity was increased
|
||||
-- send the capacity message to the limiter having the lowest number of running jobs
|
||||
-- the tiebreaker is the limiter having not registered a job in the longest time
|
||||
|
||||
local lowest_concurrency_value = nil
|
||||
local lowest_concurrency_clients = {}
|
||||
local lowest_concurrency_last_registered = {}
|
||||
local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores')
|
||||
|
||||
for i = 1, #client_concurrencies, 2 do
|
||||
local client = client_concurrencies[i]
|
||||
local concurrency = tonumber(client_concurrencies[i+1])
|
||||
|
||||
if (
|
||||
lowest_concurrency_value == nil or lowest_concurrency_value == concurrency
|
||||
) and (
|
||||
not unresponsive_lookup[client]
|
||||
) and (
|
||||
tonumber(redis.call('hget', client_num_queued_key, client)) > 0
|
||||
) then
|
||||
lowest_concurrency_value = concurrency
|
||||
table.insert(lowest_concurrency_clients, client)
|
||||
local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client))
|
||||
table.insert(lowest_concurrency_last_registered, last_registered)
|
||||
end
|
||||
end
|
||||
|
||||
if #lowest_concurrency_clients > 0 then
|
||||
local position = 1
|
||||
local earliest = lowest_concurrency_last_registered[1]
|
||||
|
||||
for i,v in ipairs(lowest_concurrency_last_registered) do
|
||||
if v < earliest then
|
||||
position = i
|
||||
earliest = v
|
||||
end
|
||||
end
|
||||
|
||||
local next_client = lowest_concurrency_clients[position]
|
||||
redis.call('publish', 'b_'..id,
|
||||
'capacity-priority:'..(final_capacity or '')..
|
||||
':'..next_client..
|
||||
':'..capacityPriorityCounter
|
||||
)
|
||||
redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1')
|
||||
else
|
||||
redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))
|
||||
end
|
||||
end
|
||||
|
||||
return {
|
||||
['capacity'] = final_capacity,
|
||||
['running'] = running,
|
||||
['reservoir'] = reservoir
|
||||
}
|
||||
end
|
10
node_modules/bottleneck/src/redis/queued.lua
generated
vendored
Normal file
10
node_modules/bottleneck/src/redis/queued.lua
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout'))
|
||||
local valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf')
|
||||
local client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients))
|
||||
|
||||
local sum = 0
|
||||
for i = 1, #client_queued do
|
||||
sum = sum + tonumber(client_queued[i])
|
||||
end
|
||||
|
||||
return sum
|
11
node_modules/bottleneck/src/redis/refresh_expiration.lua
generated
vendored
Normal file
11
node_modules/bottleneck/src/redis/refresh_expiration.lua
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
local refresh_expiration = function (now, nextRequest, groupTimeout)
|
||||
|
||||
if groupTimeout ~= nil then
|
||||
local ttl = (nextRequest + groupTimeout) - now
|
||||
|
||||
for i = 1, #KEYS do
|
||||
redis.call('pexpire', KEYS[i], ttl)
|
||||
end
|
||||
end
|
||||
|
||||
end
|
13
node_modules/bottleneck/src/redis/refs.lua
generated
vendored
Normal file
13
node_modules/bottleneck/src/redis/refs.lua
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
local settings_key = KEYS[1]
|
||||
local job_weights_key = KEYS[2]
|
||||
local job_expirations_key = KEYS[3]
|
||||
local job_clients_key = KEYS[4]
|
||||
local client_running_key = KEYS[5]
|
||||
local client_num_queued_key = KEYS[6]
|
||||
local client_last_registered_key = KEYS[7]
|
||||
local client_last_seen_key = KEYS[8]
|
||||
|
||||
local now = tonumber(ARGV[1])
|
||||
local client = ARGV[2]
|
||||
|
||||
local num_static_argv = 2
|
51
node_modules/bottleneck/src/redis/register.lua
generated
vendored
Normal file
51
node_modules/bottleneck/src/redis/register.lua
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
local index = ARGV[num_static_argv + 1]
|
||||
local weight = tonumber(ARGV[num_static_argv + 2])
|
||||
local expiration = tonumber(ARGV[num_static_argv + 3])
|
||||
|
||||
local state = process_tick(now, false)
|
||||
local capacity = state['capacity']
|
||||
local reservoir = state['reservoir']
|
||||
|
||||
local settings = redis.call('hmget', settings_key,
|
||||
'nextRequest',
|
||||
'minTime',
|
||||
'groupTimeout'
|
||||
)
|
||||
local nextRequest = tonumber(settings[1])
|
||||
local minTime = tonumber(settings[2])
|
||||
local groupTimeout = tonumber(settings[3])
|
||||
|
||||
if conditions_check(capacity, weight) then
|
||||
|
||||
redis.call('hincrby', settings_key, 'running', weight)
|
||||
redis.call('hset', job_weights_key, index, weight)
|
||||
if expiration ~= nil then
|
||||
redis.call('zadd', job_expirations_key, now + expiration, index)
|
||||
end
|
||||
redis.call('hset', job_clients_key, index, client)
|
||||
redis.call('zincrby', client_running_key, weight, client)
|
||||
redis.call('hincrby', client_num_queued_key, client, -1)
|
||||
redis.call('zadd', client_last_registered_key, now, client)
|
||||
|
||||
local wait = math.max(nextRequest - now, 0)
|
||||
local newNextRequest = now + wait + minTime
|
||||
|
||||
if reservoir == nil then
|
||||
redis.call('hset', settings_key,
|
||||
'nextRequest', newNextRequest
|
||||
)
|
||||
else
|
||||
reservoir = reservoir - weight
|
||||
redis.call('hmset', settings_key,
|
||||
'reservoir', reservoir,
|
||||
'nextRequest', newNextRequest
|
||||
)
|
||||
end
|
||||
|
||||
refresh_expiration(now, newNextRequest, groupTimeout)
|
||||
|
||||
return {true, wait, reservoir}
|
||||
|
||||
else
|
||||
return {false}
|
||||
end
|
12
node_modules/bottleneck/src/redis/register_client.lua
generated
vendored
Normal file
12
node_modules/bottleneck/src/redis/register_client.lua
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
local queued = tonumber(ARGV[num_static_argv + 1])
|
||||
|
||||
-- Could have been re-registered concurrently
|
||||
if not redis.call('zscore', client_last_seen_key, client) then
|
||||
redis.call('zadd', client_running_key, 0, client)
|
||||
redis.call('hset', client_num_queued_key, client, queued)
|
||||
redis.call('zadd', client_last_registered_key, 0, client)
|
||||
end
|
||||
|
||||
redis.call('zadd', client_last_seen_key, now, client)
|
||||
|
||||
return {}
|
1
node_modules/bottleneck/src/redis/running.lua
generated
vendored
Normal file
1
node_modules/bottleneck/src/redis/running.lua
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
return process_tick(now, false)['running']
|
74
node_modules/bottleneck/src/redis/submit.lua
generated
vendored
Normal file
74
node_modules/bottleneck/src/redis/submit.lua
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
local queueLength = tonumber(ARGV[num_static_argv + 1])
|
||||
local weight = tonumber(ARGV[num_static_argv + 2])
|
||||
|
||||
local capacity = process_tick(now, false)['capacity']
|
||||
|
||||
local settings = redis.call('hmget', settings_key,
|
||||
'id',
|
||||
'maxConcurrent',
|
||||
'highWater',
|
||||
'nextRequest',
|
||||
'strategy',
|
||||
'unblockTime',
|
||||
'penalty',
|
||||
'minTime',
|
||||
'groupTimeout'
|
||||
)
|
||||
local id = settings[1]
|
||||
local maxConcurrent = tonumber(settings[2])
|
||||
local highWater = tonumber(settings[3])
|
||||
local nextRequest = tonumber(settings[4])
|
||||
local strategy = tonumber(settings[5])
|
||||
local unblockTime = tonumber(settings[6])
|
||||
local penalty = tonumber(settings[7])
|
||||
local minTime = tonumber(settings[8])
|
||||
local groupTimeout = tonumber(settings[9])
|
||||
|
||||
if maxConcurrent ~= nil and weight > maxConcurrent then
|
||||
return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent)
|
||||
end
|
||||
|
||||
local reachedHWM = (highWater ~= nil and queueLength == highWater
|
||||
and not (
|
||||
conditions_check(capacity, weight)
|
||||
and nextRequest - now <= 0
|
||||
)
|
||||
)
|
||||
|
||||
local blocked = strategy == 3 and (reachedHWM or unblockTime >= now)
|
||||
|
||||
if blocked then
|
||||
local computedPenalty = penalty
|
||||
if computedPenalty == nil then
|
||||
if minTime == 0 then
|
||||
computedPenalty = 5000
|
||||
else
|
||||
computedPenalty = 15 * minTime
|
||||
end
|
||||
end
|
||||
|
||||
local newNextRequest = now + computedPenalty + minTime
|
||||
|
||||
redis.call('hmset', settings_key,
|
||||
'unblockTime', now + computedPenalty,
|
||||
'nextRequest', newNextRequest
|
||||
)
|
||||
|
||||
local clients_queued_reset = redis.call('hkeys', client_num_queued_key)
|
||||
local queued_reset = {}
|
||||
for i = 1, #clients_queued_reset do
|
||||
table.insert(queued_reset, clients_queued_reset[i])
|
||||
table.insert(queued_reset, 0)
|
||||
end
|
||||
redis.call('hmset', client_num_queued_key, unpack(queued_reset))
|
||||
|
||||
redis.call('publish', 'b_'..id, 'blocked:')
|
||||
|
||||
refresh_expiration(now, newNextRequest, groupTimeout)
|
||||
end
|
||||
|
||||
if not blocked and not reachedHWM then
|
||||
redis.call('hincrby', client_num_queued_key, client, 1)
|
||||
end
|
||||
|
||||
return {reachedHWM, blocked, strategy}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user