mirror of
https://github.com/xfarrow/blink
synced 2025-06-27 09:03:02 +02:00
Change endpoint from persons to people
This commit is contained in:
21
backend/apis/nodejs/node_modules/pg-pool/LICENSE
generated
vendored
Normal file
21
backend/apis/nodejs/node_modules/pg-pool/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Brian M. Carlson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
376
backend/apis/nodejs/node_modules/pg-pool/README.md
generated
vendored
Normal file
376
backend/apis/nodejs/node_modules/pg-pool/README.md
generated
vendored
Normal file
@ -0,0 +1,376 @@
|
||||
# pg-pool
|
||||
[](https://travis-ci.org/brianc/node-pg-pool)
|
||||
|
||||
A connection pool for node-postgres
|
||||
|
||||
## install
|
||||
```sh
|
||||
npm i pg-pool pg
|
||||
```
|
||||
|
||||
## use
|
||||
|
||||
### create
|
||||
|
||||
to use pg-pool you must first create an instance of a pool
|
||||
|
||||
```js
|
||||
var Pool = require('pg-pool')
|
||||
|
||||
// by default the pool uses the same
|
||||
// configuration as whatever `pg` version you have installed
|
||||
var pool = new Pool()
|
||||
|
||||
// you can pass properties to the pool
|
||||
// these properties are passed unchanged to both the node-postgres Client constructor
|
||||
// and the node-pool (https://github.com/coopernurse/node-pool) constructor
|
||||
// allowing you to fully configure the behavior of both
|
||||
var pool2 = new Pool({
|
||||
database: 'postgres',
|
||||
user: 'brianc',
|
||||
password: 'secret!',
|
||||
port: 5432,
|
||||
ssl: true,
|
||||
max: 20, // set pool max size to 20
|
||||
idleTimeoutMillis: 1000, // close idle clients after 1 second
|
||||
connectionTimeoutMillis: 1000, // return an error after 1 second if connection could not be established
|
||||
maxUses: 7500, // close (and replace) a connection after it has been used 7500 times (see below for discussion)
|
||||
})
|
||||
|
||||
//you can supply a custom client constructor
|
||||
//if you want to use the native postgres client
|
||||
var NativeClient = require('pg').native.Client
|
||||
var nativePool = new Pool({ Client: NativeClient })
|
||||
|
||||
//you can even pool pg-native clients directly
|
||||
var PgNativeClient = require('pg-native')
|
||||
var pgNativePool = new Pool({ Client: PgNativeClient })
|
||||
```
|
||||
|
||||
##### Note:
|
||||
The Pool constructor does not support passing a Database URL as the parameter. To use pg-pool on heroku, for example, you need to parse the URL into a config object. Here is an example of how to parse a Database URL.
|
||||
|
||||
```js
|
||||
const Pool = require('pg-pool');
|
||||
const url = require('url')
|
||||
|
||||
const params = url.parse(process.env.DATABASE_URL);
|
||||
const auth = params.auth.split(':');
|
||||
|
||||
const config = {
|
||||
user: auth[0],
|
||||
password: auth[1],
|
||||
host: params.hostname,
|
||||
port: params.port,
|
||||
database: params.pathname.split('/')[1],
|
||||
ssl: true
|
||||
};
|
||||
|
||||
const pool = new Pool(config);
|
||||
|
||||
/*
|
||||
Transforms, 'postgres://DBuser:secret@DBHost:#####/myDB', into
|
||||
config = {
|
||||
user: 'DBuser',
|
||||
password: 'secret',
|
||||
host: 'DBHost',
|
||||
port: '#####',
|
||||
database: 'myDB',
|
||||
ssl: true
|
||||
}
|
||||
*/
|
||||
```
|
||||
|
||||
### acquire clients with a promise
|
||||
|
||||
pg-pool supports a fully promise-based api for acquiring clients
|
||||
|
||||
```js
|
||||
var pool = new Pool()
|
||||
pool.connect().then(client => {
|
||||
client.query('select $1::text as name', ['pg-pool']).then(res => {
|
||||
client.release()
|
||||
console.log('hello from', res.rows[0].name)
|
||||
})
|
||||
.catch(e => {
|
||||
client.release()
|
||||
console.error('query error', e.message, e.stack)
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### plays nice with async/await
|
||||
|
||||
this ends up looking much nicer if you're using [co](https://github.com/tj/co) or async/await:
|
||||
|
||||
```js
|
||||
// with async/await
|
||||
(async () => {
|
||||
var pool = new Pool()
|
||||
var client = await pool.connect()
|
||||
try {
|
||||
var result = await client.query('select $1::text as name', ['brianc'])
|
||||
console.log('hello from', result.rows[0])
|
||||
} finally {
|
||||
client.release()
|
||||
}
|
||||
})().catch(e => console.error(e.message, e.stack))
|
||||
|
||||
// with co
|
||||
co(function * () {
|
||||
var client = yield pool.connect()
|
||||
try {
|
||||
var result = yield client.query('select $1::text as name', ['brianc'])
|
||||
console.log('hello from', result.rows[0])
|
||||
} finally {
|
||||
client.release()
|
||||
}
|
||||
}).catch(e => console.error(e.message, e.stack))
|
||||
```
|
||||
|
||||
### your new favorite helper method
|
||||
|
||||
because its so common to just run a query and return the client to the pool afterward pg-pool has this built-in:
|
||||
|
||||
```js
|
||||
var pool = new Pool()
|
||||
var time = await pool.query('SELECT NOW()')
|
||||
var name = await pool.query('select $1::text as name', ['brianc'])
|
||||
console.log(name.rows[0].name, 'says hello at', time.rows[0].now)
|
||||
```
|
||||
|
||||
you can also use a callback here if you'd like:
|
||||
|
||||
```js
|
||||
var pool = new Pool()
|
||||
pool.query('SELECT $1::text as name', ['brianc'], function (err, res) {
|
||||
console.log(res.rows[0].name) // brianc
|
||||
})
|
||||
```
|
||||
|
||||
__pro tip:__ unless you need to run a transaction (which requires a single client for multiple queries) or you
|
||||
have some other edge case like [streaming rows](https://github.com/brianc/node-pg-query-stream) or using a [cursor](https://github.com/brianc/node-pg-cursor)
|
||||
you should almost always just use `pool.query`. Its easy, it does the right thing :tm:, and wont ever forget to return
|
||||
clients back to the pool after the query is done.
|
||||
|
||||
### drop-in backwards compatible
|
||||
|
||||
pg-pool still and will always support the traditional callback api for acquiring a client. This is the exact API node-postgres has shipped with for years:
|
||||
|
||||
```js
|
||||
var pool = new Pool()
|
||||
pool.connect((err, client, done) => {
|
||||
if (err) return done(err)
|
||||
|
||||
client.query('SELECT $1::text as name', ['pg-pool'], (err, res) => {
|
||||
done()
|
||||
if (err) {
|
||||
return console.error('query error', err.message, err.stack)
|
||||
}
|
||||
console.log('hello from', res.rows[0].name)
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### shut it down
|
||||
|
||||
When you are finished with the pool if all the clients are idle the pool will close them after `config.idleTimeoutMillis` and your app
|
||||
will shutdown gracefully. If you don't want to wait for the timeout you can end the pool as follows:
|
||||
|
||||
```js
|
||||
var pool = new Pool()
|
||||
var client = await pool.connect()
|
||||
console.log(await client.query('select now()'))
|
||||
client.release()
|
||||
await pool.end()
|
||||
```
|
||||
|
||||
### a note on instances
|
||||
|
||||
The pool should be a __long-lived object__ in your application. Generally you'll want to instantiate one pool when your app starts up and use the same instance of the pool throughout the lifetime of your application. If you are frequently creating a new pool within your code you likely don't have your pool initialization code in the correct place. Example:
|
||||
|
||||
```js
|
||||
// assume this is a file in your program at ./your-app/lib/db.js
|
||||
|
||||
// correct usage: create the pool and let it live
|
||||
// 'globally' here, controlling access to it through exported methods
|
||||
var pool = new pg.Pool()
|
||||
|
||||
// this is the right way to export the query method
|
||||
module.exports.query = (text, values) => {
|
||||
console.log('query:', text, values)
|
||||
return pool.query(text, values)
|
||||
}
|
||||
|
||||
// this would be the WRONG way to export the connect method
|
||||
module.exports.connect = () => {
|
||||
// notice how we would be creating a pool instance here
|
||||
// every time we called 'connect' to get a new client?
|
||||
// that's a bad thing & results in creating an unbounded
|
||||
// number of pools & therefore connections
|
||||
var aPool = new pg.Pool()
|
||||
return aPool.connect()
|
||||
}
|
||||
```
|
||||
|
||||
### events
|
||||
|
||||
Every instance of a `Pool` is an event emitter. These instances emit the following events:
|
||||
|
||||
#### error
|
||||
|
||||
Emitted whenever an idle client in the pool encounters an error. This is common when your PostgreSQL server shuts down, reboots, or a network partition otherwise causes it to become unavailable while your pool has connected clients.
|
||||
|
||||
Example:
|
||||
|
||||
```js
|
||||
const Pool = require('pg-pool')
|
||||
const pool = new Pool()
|
||||
|
||||
// attach an error handler to the pool for when a connected, idle client
|
||||
// receives an error by being disconnected, etc
|
||||
pool.on('error', function(error, client) {
|
||||
// handle this in the same way you would treat process.on('uncaughtException')
|
||||
// it is supplied the error as well as the idle client which received the error
|
||||
})
|
||||
```
|
||||
|
||||
#### connect
|
||||
|
||||
Fired whenever the pool creates a __new__ `pg.Client` instance and successfully connects it to the backend.
|
||||
|
||||
Example:
|
||||
|
||||
```js
|
||||
const Pool = require('pg-pool')
|
||||
const pool = new Pool()
|
||||
|
||||
var count = 0
|
||||
|
||||
pool.on('connect', client => {
|
||||
client.count = count++
|
||||
})
|
||||
|
||||
pool
|
||||
.connect()
|
||||
.then(client => {
|
||||
return client
|
||||
.query('SELECT $1::int AS "clientCount"', [client.count])
|
||||
.then(res => console.log(res.rows[0].clientCount)) // outputs 0
|
||||
.then(() => client)
|
||||
})
|
||||
.then(client => client.release())
|
||||
|
||||
```
|
||||
|
||||
#### acquire
|
||||
|
||||
Fired whenever a client is acquired from the pool
|
||||
|
||||
Example:
|
||||
|
||||
This allows you to count the number of clients which have ever been acquired from the pool.
|
||||
|
||||
```js
|
||||
var Pool = require('pg-pool')
|
||||
var pool = new Pool()
|
||||
|
||||
var acquireCount = 0
|
||||
pool.on('acquire', function (client) {
|
||||
acquireCount++
|
||||
})
|
||||
|
||||
var connectCount = 0
|
||||
pool.on('connect', function () {
|
||||
connectCount++
|
||||
})
|
||||
|
||||
for (var i = 0; i < 200; i++) {
|
||||
pool.query('SELECT NOW()')
|
||||
}
|
||||
|
||||
setTimeout(function () {
|
||||
console.log('connect count:', connectCount) // output: connect count: 10
|
||||
console.log('acquire count:', acquireCount) // output: acquire count: 200
|
||||
}, 100)
|
||||
|
||||
```
|
||||
|
||||
### environment variables
|
||||
|
||||
pg-pool & node-postgres support some of the same environment variables as `psql` supports. The most common are:
|
||||
|
||||
```
|
||||
PGDATABASE=my_db
|
||||
PGUSER=username
|
||||
PGPASSWORD="my awesome password"
|
||||
PGPORT=5432
|
||||
PGSSLMODE=require
|
||||
```
|
||||
|
||||
Usually I will export these into my local environment via a `.env` file with environment settings or export them in `~/.bash_profile` or something similar. This way I get configurability which works with both the postgres suite of tools (`psql`, `pg_dump`, `pg_restore`) and node, I can vary the environment variables locally and in production, and it supports the concept of a [12-factor app](http://12factor.net/) out of the box.
|
||||
|
||||
## bring your own promise
|
||||
|
||||
In versions of node `<=0.12.x` there is no native promise implementation available globally. You can polyfill the promise globally like this:
|
||||
|
||||
```js
|
||||
// first run `npm install promise-polyfill --save
|
||||
if (typeof Promise == 'undefined') {
|
||||
global.Promise = require('promise-polyfill')
|
||||
}
|
||||
```
|
||||
|
||||
You can use any other promise implementation you'd like. The pool also allows you to configure the promise implementation on a per-pool level:
|
||||
|
||||
```js
|
||||
var bluebirdPool = new Pool({
|
||||
Promise: require('bluebird')
|
||||
})
|
||||
```
|
||||
|
||||
__please note:__ in node `<=0.12.x` the pool will throw if you do not provide a promise constructor in one of the two ways mentioned above. In node `>=4.0.0` the pool will use the native promise implementation by default; however, the two methods above still allow you to "bring your own."
|
||||
|
||||
## maxUses and read-replica autoscaling (e.g. AWS Aurora)
|
||||
|
||||
The maxUses config option can help an application instance rebalance load against a replica set that has been auto-scaled after the connection pool is already full of healthy connections.
|
||||
|
||||
The mechanism here is that a connection is considered "expended" after it has been acquired and released `maxUses` number of times. Depending on the load on your system, this means there will be an approximate time in which any given connection will live, thus creating a window for rebalancing.
|
||||
|
||||
Imagine a scenario where you have 10 app instances providing an API running against a replica cluster of 3 that are accessed via a round-robin DNS entry. Each instance runs a connection pool size of 20. With an ambient load of 50 requests per second, the connection pool will likely fill up in a few minutes with healthy connections.
|
||||
|
||||
If you have weekly bursts of traffic which peak at 1,000 requests per second, you might want to grow your replicas to 10 during this period. Without setting `maxUses`, the new replicas will not be adopted by the app servers without an intervention -- namely, restarting each in turn in order to build up new connection pools that are balanced against all the replicas. Adding additional app server instances will help to some extent because they will adopt all the replicas in an even way, but the initial app servers will continue to focus additional load on the original replicas.
|
||||
|
||||
This is where the `maxUses` configuration option comes into play. Setting `maxUses` to 7500 will ensure that over a period of 30 minutes or so the new replicas will be adopted as the pre-existing connections are closed and replaced with new ones, thus creating a window for eventual balance.
|
||||
|
||||
You'll want to test based on your own scenarios, but one way to make a first guess at `maxUses` is to identify an acceptable window for rebalancing and then solve for the value:
|
||||
|
||||
```
|
||||
maxUses = rebalanceWindowSeconds * totalRequestsPerSecond / numAppInstances / poolSize
|
||||
```
|
||||
|
||||
In the example above, assuming we acquire and release 1 connection per request and we are aiming for a 30 minute rebalancing window:
|
||||
|
||||
```
|
||||
maxUses = rebalanceWindowSeconds * totalRequestsPerSecond / numAppInstances / poolSize
|
||||
7200 = 1800 * 1000 / 10 / 25
|
||||
```
|
||||
|
||||
## tests
|
||||
|
||||
To run tests clone the repo, `npm i` in the working dir, and then run `npm test`
|
||||
|
||||
## contributions
|
||||
|
||||
I love contributions. Please make sure they have tests, and submit a PR. If you're not sure if the issue is worth it or will be accepted it never hurts to open an issue to begin the conversation. If you're interested in keeping up with node-postgres releated stuff, you can follow me on twitter at [@briancarlson](https://twitter.com/briancarlson) - I generally announce any noteworthy updates there.
|
||||
|
||||
## license
|
||||
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2016 Brian M. Carlson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
471
backend/apis/nodejs/node_modules/pg-pool/index.js
generated
vendored
Normal file
471
backend/apis/nodejs/node_modules/pg-pool/index.js
generated
vendored
Normal file
@ -0,0 +1,471 @@
|
||||
'use strict'
|
||||
const EventEmitter = require('events').EventEmitter
|
||||
|
||||
const NOOP = function () {}
|
||||
|
||||
const removeWhere = (list, predicate) => {
|
||||
const i = list.findIndex(predicate)
|
||||
|
||||
return i === -1 ? undefined : list.splice(i, 1)[0]
|
||||
}
|
||||
|
||||
class IdleItem {
|
||||
constructor(client, idleListener, timeoutId) {
|
||||
this.client = client
|
||||
this.idleListener = idleListener
|
||||
this.timeoutId = timeoutId
|
||||
}
|
||||
}
|
||||
|
||||
class PendingItem {
|
||||
constructor(callback) {
|
||||
this.callback = callback
|
||||
}
|
||||
}
|
||||
|
||||
function throwOnDoubleRelease() {
|
||||
throw new Error('Release called on client which has already been released to the pool.')
|
||||
}
|
||||
|
||||
function promisify(Promise, callback) {
|
||||
if (callback) {
|
||||
return { callback: callback, result: undefined }
|
||||
}
|
||||
let rej
|
||||
let res
|
||||
const cb = function (err, client) {
|
||||
err ? rej(err) : res(client)
|
||||
}
|
||||
const result = new Promise(function (resolve, reject) {
|
||||
res = resolve
|
||||
rej = reject
|
||||
}).catch((err) => {
|
||||
// replace the stack trace that leads to `TCP.onStreamRead` with one that leads back to the
|
||||
// application that created the query
|
||||
Error.captureStackTrace(err)
|
||||
throw err
|
||||
})
|
||||
return { callback: cb, result: result }
|
||||
}
|
||||
|
||||
function makeIdleListener(pool, client) {
|
||||
return function idleListener(err) {
|
||||
err.client = client
|
||||
|
||||
client.removeListener('error', idleListener)
|
||||
client.on('error', () => {
|
||||
pool.log('additional client error after disconnection due to error', err)
|
||||
})
|
||||
pool._remove(client)
|
||||
// TODO - document that once the pool emits an error
|
||||
// the client has already been closed & purged and is unusable
|
||||
pool.emit('error', err, client)
|
||||
}
|
||||
}
|
||||
|
||||
class Pool extends EventEmitter {
|
||||
constructor(options, Client) {
|
||||
super()
|
||||
this.options = Object.assign({}, options)
|
||||
|
||||
if (options != null && 'password' in options) {
|
||||
// "hiding" the password so it doesn't show up in stack traces
|
||||
// or if the client is console.logged
|
||||
Object.defineProperty(this.options, 'password', {
|
||||
configurable: true,
|
||||
enumerable: false,
|
||||
writable: true,
|
||||
value: options.password,
|
||||
})
|
||||
}
|
||||
if (options != null && options.ssl && options.ssl.key) {
|
||||
// "hiding" the ssl->key so it doesn't show up in stack traces
|
||||
// or if the client is console.logged
|
||||
Object.defineProperty(this.options.ssl, 'key', {
|
||||
enumerable: false,
|
||||
})
|
||||
}
|
||||
|
||||
this.options.max = this.options.max || this.options.poolSize || 10
|
||||
this.options.maxUses = this.options.maxUses || Infinity
|
||||
this.options.allowExitOnIdle = this.options.allowExitOnIdle || false
|
||||
this.options.maxLifetimeSeconds = this.options.maxLifetimeSeconds || 0
|
||||
this.log = this.options.log || function () {}
|
||||
this.Client = this.options.Client || Client || require('pg').Client
|
||||
this.Promise = this.options.Promise || global.Promise
|
||||
|
||||
if (typeof this.options.idleTimeoutMillis === 'undefined') {
|
||||
this.options.idleTimeoutMillis = 10000
|
||||
}
|
||||
|
||||
this._clients = []
|
||||
this._idle = []
|
||||
this._expired = new WeakSet()
|
||||
this._pendingQueue = []
|
||||
this._endCallback = undefined
|
||||
this.ending = false
|
||||
this.ended = false
|
||||
}
|
||||
|
||||
_isFull() {
|
||||
return this._clients.length >= this.options.max
|
||||
}
|
||||
|
||||
_pulseQueue() {
|
||||
this.log('pulse queue')
|
||||
if (this.ended) {
|
||||
this.log('pulse queue ended')
|
||||
return
|
||||
}
|
||||
if (this.ending) {
|
||||
this.log('pulse queue on ending')
|
||||
if (this._idle.length) {
|
||||
this._idle.slice().map((item) => {
|
||||
this._remove(item.client)
|
||||
})
|
||||
}
|
||||
if (!this._clients.length) {
|
||||
this.ended = true
|
||||
this._endCallback()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// if we don't have any waiting, do nothing
|
||||
if (!this._pendingQueue.length) {
|
||||
this.log('no queued requests')
|
||||
return
|
||||
}
|
||||
// if we don't have any idle clients and we have no more room do nothing
|
||||
if (!this._idle.length && this._isFull()) {
|
||||
return
|
||||
}
|
||||
const pendingItem = this._pendingQueue.shift()
|
||||
if (this._idle.length) {
|
||||
const idleItem = this._idle.pop()
|
||||
clearTimeout(idleItem.timeoutId)
|
||||
const client = idleItem.client
|
||||
client.ref && client.ref()
|
||||
const idleListener = idleItem.idleListener
|
||||
|
||||
return this._acquireClient(client, pendingItem, idleListener, false)
|
||||
}
|
||||
if (!this._isFull()) {
|
||||
return this.newClient(pendingItem)
|
||||
}
|
||||
throw new Error('unexpected condition')
|
||||
}
|
||||
|
||||
_remove(client) {
|
||||
const removed = removeWhere(this._idle, (item) => item.client === client)
|
||||
|
||||
if (removed !== undefined) {
|
||||
clearTimeout(removed.timeoutId)
|
||||
}
|
||||
|
||||
this._clients = this._clients.filter((c) => c !== client)
|
||||
client.end()
|
||||
this.emit('remove', client)
|
||||
}
|
||||
|
||||
connect(cb) {
|
||||
if (this.ending) {
|
||||
const err = new Error('Cannot use a pool after calling end on the pool')
|
||||
return cb ? cb(err) : this.Promise.reject(err)
|
||||
}
|
||||
|
||||
const response = promisify(this.Promise, cb)
|
||||
const result = response.result
|
||||
|
||||
// if we don't have to connect a new client, don't do so
|
||||
if (this._isFull() || this._idle.length) {
|
||||
// if we have idle clients schedule a pulse immediately
|
||||
if (this._idle.length) {
|
||||
process.nextTick(() => this._pulseQueue())
|
||||
}
|
||||
|
||||
if (!this.options.connectionTimeoutMillis) {
|
||||
this._pendingQueue.push(new PendingItem(response.callback))
|
||||
return result
|
||||
}
|
||||
|
||||
const queueCallback = (err, res, done) => {
|
||||
clearTimeout(tid)
|
||||
response.callback(err, res, done)
|
||||
}
|
||||
|
||||
const pendingItem = new PendingItem(queueCallback)
|
||||
|
||||
// set connection timeout on checking out an existing client
|
||||
const tid = setTimeout(() => {
|
||||
// remove the callback from pending waiters because
|
||||
// we're going to call it with a timeout error
|
||||
removeWhere(this._pendingQueue, (i) => i.callback === queueCallback)
|
||||
pendingItem.timedOut = true
|
||||
response.callback(new Error('timeout exceeded when trying to connect'))
|
||||
}, this.options.connectionTimeoutMillis)
|
||||
|
||||
if (tid.unref) {
|
||||
tid.unref()
|
||||
}
|
||||
|
||||
this._pendingQueue.push(pendingItem)
|
||||
return result
|
||||
}
|
||||
|
||||
this.newClient(new PendingItem(response.callback))
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
newClient(pendingItem) {
|
||||
const client = new this.Client(this.options)
|
||||
this._clients.push(client)
|
||||
const idleListener = makeIdleListener(this, client)
|
||||
|
||||
this.log('checking client timeout')
|
||||
|
||||
// connection timeout logic
|
||||
let tid
|
||||
let timeoutHit = false
|
||||
if (this.options.connectionTimeoutMillis) {
|
||||
tid = setTimeout(() => {
|
||||
this.log('ending client due to timeout')
|
||||
timeoutHit = true
|
||||
// force kill the node driver, and let libpq do its teardown
|
||||
client.connection ? client.connection.stream.destroy() : client.end()
|
||||
}, this.options.connectionTimeoutMillis)
|
||||
}
|
||||
|
||||
this.log('connecting new client')
|
||||
client.connect((err) => {
|
||||
if (tid) {
|
||||
clearTimeout(tid)
|
||||
}
|
||||
client.on('error', idleListener)
|
||||
if (err) {
|
||||
this.log('client failed to connect', err)
|
||||
// remove the dead client from our list of clients
|
||||
this._clients = this._clients.filter((c) => c !== client)
|
||||
if (timeoutHit) {
|
||||
err = new Error('Connection terminated due to connection timeout', { cause: err })
|
||||
}
|
||||
|
||||
// this client won’t be released, so move on immediately
|
||||
this._pulseQueue()
|
||||
|
||||
if (!pendingItem.timedOut) {
|
||||
pendingItem.callback(err, undefined, NOOP)
|
||||
}
|
||||
} else {
|
||||
this.log('new client connected')
|
||||
|
||||
if (this.options.maxLifetimeSeconds !== 0) {
|
||||
const maxLifetimeTimeout = setTimeout(() => {
|
||||
this.log('ending client due to expired lifetime')
|
||||
this._expired.add(client)
|
||||
const idleIndex = this._idle.findIndex((idleItem) => idleItem.client === client)
|
||||
if (idleIndex !== -1) {
|
||||
this._acquireClient(
|
||||
client,
|
||||
new PendingItem((err, client, clientRelease) => clientRelease()),
|
||||
idleListener,
|
||||
false
|
||||
)
|
||||
}
|
||||
}, this.options.maxLifetimeSeconds * 1000)
|
||||
|
||||
maxLifetimeTimeout.unref()
|
||||
client.once('end', () => clearTimeout(maxLifetimeTimeout))
|
||||
}
|
||||
|
||||
return this._acquireClient(client, pendingItem, idleListener, true)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// acquire a client for a pending work item
|
||||
_acquireClient(client, pendingItem, idleListener, isNew) {
|
||||
if (isNew) {
|
||||
this.emit('connect', client)
|
||||
}
|
||||
|
||||
this.emit('acquire', client)
|
||||
|
||||
client.release = this._releaseOnce(client, idleListener)
|
||||
|
||||
client.removeListener('error', idleListener)
|
||||
|
||||
if (!pendingItem.timedOut) {
|
||||
if (isNew && this.options.verify) {
|
||||
this.options.verify(client, (err) => {
|
||||
if (err) {
|
||||
client.release(err)
|
||||
return pendingItem.callback(err, undefined, NOOP)
|
||||
}
|
||||
|
||||
pendingItem.callback(undefined, client, client.release)
|
||||
})
|
||||
} else {
|
||||
pendingItem.callback(undefined, client, client.release)
|
||||
}
|
||||
} else {
|
||||
if (isNew && this.options.verify) {
|
||||
this.options.verify(client, client.release)
|
||||
} else {
|
||||
client.release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// returns a function that wraps _release and throws if called more than once
|
||||
_releaseOnce(client, idleListener) {
|
||||
let released = false
|
||||
|
||||
return (err) => {
|
||||
if (released) {
|
||||
throwOnDoubleRelease()
|
||||
}
|
||||
|
||||
released = true
|
||||
this._release(client, idleListener, err)
|
||||
}
|
||||
}
|
||||
|
||||
// release a client back to the poll, include an error
|
||||
// to remove it from the pool
|
||||
_release(client, idleListener, err) {
|
||||
client.on('error', idleListener)
|
||||
|
||||
client._poolUseCount = (client._poolUseCount || 0) + 1
|
||||
|
||||
this.emit('release', err, client)
|
||||
|
||||
// TODO(bmc): expose a proper, public interface _queryable and _ending
|
||||
if (err || this.ending || !client._queryable || client._ending || client._poolUseCount >= this.options.maxUses) {
|
||||
if (client._poolUseCount >= this.options.maxUses) {
|
||||
this.log('remove expended client')
|
||||
}
|
||||
this._remove(client)
|
||||
this._pulseQueue()
|
||||
return
|
||||
}
|
||||
|
||||
const isExpired = this._expired.has(client)
|
||||
if (isExpired) {
|
||||
this.log('remove expired client')
|
||||
this._expired.delete(client)
|
||||
this._remove(client)
|
||||
this._pulseQueue()
|
||||
return
|
||||
}
|
||||
|
||||
// idle timeout
|
||||
let tid
|
||||
if (this.options.idleTimeoutMillis) {
|
||||
tid = setTimeout(() => {
|
||||
this.log('remove idle client')
|
||||
this._remove(client)
|
||||
}, this.options.idleTimeoutMillis)
|
||||
|
||||
if (this.options.allowExitOnIdle) {
|
||||
// allow Node to exit if this is all that's left
|
||||
tid.unref()
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.allowExitOnIdle) {
|
||||
client.unref()
|
||||
}
|
||||
|
||||
this._idle.push(new IdleItem(client, idleListener, tid))
|
||||
this._pulseQueue()
|
||||
}
|
||||
|
||||
query(text, values, cb) {
|
||||
// guard clause against passing a function as the first parameter
|
||||
if (typeof text === 'function') {
|
||||
const response = promisify(this.Promise, text)
|
||||
setImmediate(function () {
|
||||
return response.callback(new Error('Passing a function as the first parameter to pool.query is not supported'))
|
||||
})
|
||||
return response.result
|
||||
}
|
||||
|
||||
// allow plain text query without values
|
||||
if (typeof values === 'function') {
|
||||
cb = values
|
||||
values = undefined
|
||||
}
|
||||
const response = promisify(this.Promise, cb)
|
||||
cb = response.callback
|
||||
|
||||
this.connect((err, client) => {
|
||||
if (err) {
|
||||
return cb(err)
|
||||
}
|
||||
|
||||
let clientReleased = false
|
||||
const onError = (err) => {
|
||||
if (clientReleased) {
|
||||
return
|
||||
}
|
||||
clientReleased = true
|
||||
client.release(err)
|
||||
cb(err)
|
||||
}
|
||||
|
||||
client.once('error', onError)
|
||||
this.log('dispatching query')
|
||||
try {
|
||||
client.query(text, values, (err, res) => {
|
||||
this.log('query dispatched')
|
||||
client.removeListener('error', onError)
|
||||
if (clientReleased) {
|
||||
return
|
||||
}
|
||||
clientReleased = true
|
||||
client.release(err)
|
||||
if (err) {
|
||||
return cb(err)
|
||||
}
|
||||
return cb(undefined, res)
|
||||
})
|
||||
} catch (err) {
|
||||
client.release(err)
|
||||
return cb(err)
|
||||
}
|
||||
})
|
||||
return response.result
|
||||
}
|
||||
|
||||
end(cb) {
|
||||
this.log('ending')
|
||||
if (this.ending) {
|
||||
const err = new Error('Called end on pool more than once')
|
||||
return cb ? cb(err) : this.Promise.reject(err)
|
||||
}
|
||||
this.ending = true
|
||||
const promised = promisify(this.Promise, cb)
|
||||
this._endCallback = promised.callback
|
||||
this._pulseQueue()
|
||||
return promised.result
|
||||
}
|
||||
|
||||
get waitingCount() {
|
||||
return this._pendingQueue.length
|
||||
}
|
||||
|
||||
get idleCount() {
|
||||
return this._idle.length
|
||||
}
|
||||
|
||||
get expiredCount() {
|
||||
return this._clients.reduce((acc, client) => acc + (this._expired.has(client) ? 1 : 0), 0)
|
||||
}
|
||||
|
||||
get totalCount() {
|
||||
return this._clients.length
|
||||
}
|
||||
}
|
||||
module.exports = Pool
|
41
backend/apis/nodejs/node_modules/pg-pool/package.json
generated
vendored
Normal file
41
backend/apis/nodejs/node_modules/pg-pool/package.json
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
{
|
||||
"name": "pg-pool",
|
||||
"version": "3.8.0",
|
||||
"description": "Connection pool for node-postgres",
|
||||
"main": "index.js",
|
||||
"directories": {
|
||||
"test": "test"
|
||||
},
|
||||
"scripts": {
|
||||
"test": " node_modules/.bin/mocha"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git://github.com/brianc/node-postgres.git",
|
||||
"directory": "packages/pg-pool"
|
||||
},
|
||||
"keywords": [
|
||||
"pg",
|
||||
"postgres",
|
||||
"pool",
|
||||
"database"
|
||||
],
|
||||
"author": "Brian M. Carlson",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://github.com/brianc/node-pg-pool/issues"
|
||||
},
|
||||
"homepage": "https://github.com/brianc/node-pg-pool#readme",
|
||||
"devDependencies": {
|
||||
"bluebird": "3.7.2",
|
||||
"co": "4.6.0",
|
||||
"expect.js": "0.3.1",
|
||||
"lodash": "^4.17.11",
|
||||
"mocha": "^10.5.2",
|
||||
"pg-cursor": "^1.3.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"pg": ">=8.0"
|
||||
},
|
||||
"gitHead": "f7c92e487c6a9c9600585f9de14cb17e7a65e76e"
|
||||
}
|
42
backend/apis/nodejs/node_modules/pg-pool/test/bring-your-own-promise.js
generated
vendored
Normal file
42
backend/apis/nodejs/node_modules/pg-pool/test/bring-your-own-promise.js
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
'use strict'
|
||||
const co = require('co')
|
||||
const expect = require('expect.js')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
const BluebirdPromise = require('bluebird')
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
const checkType = (promise) => {
|
||||
expect(promise).to.be.a(BluebirdPromise)
|
||||
return promise.catch((e) => undefined)
|
||||
}
|
||||
|
||||
describe('Bring your own promise', function () {
|
||||
it(
|
||||
'uses supplied promise for operations',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ Promise: BluebirdPromise })
|
||||
const client1 = yield checkType(pool.connect())
|
||||
client1.release()
|
||||
yield checkType(pool.query('SELECT NOW()'))
|
||||
const client2 = yield checkType(pool.connect())
|
||||
// TODO - make sure pg supports BYOP as well
|
||||
client2.release()
|
||||
yield checkType(pool.end())
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'uses promises in errors',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ Promise: BluebirdPromise, port: 48484 })
|
||||
yield checkType(pool.connect())
|
||||
yield checkType(pool.end())
|
||||
yield checkType(pool.connect())
|
||||
yield checkType(pool.query())
|
||||
yield checkType(pool.end())
|
||||
})
|
||||
)
|
||||
})
|
29
backend/apis/nodejs/node_modules/pg-pool/test/connection-strings.js
generated
vendored
Normal file
29
backend/apis/nodejs/node_modules/pg-pool/test/connection-strings.js
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
const expect = require('expect.js')
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
const Pool = require('../')
|
||||
|
||||
describe('Connection strings', function () {
|
||||
it('pool delegates connectionString property to client', function (done) {
|
||||
const connectionString = 'postgres://foo:bar@baz:1234/xur'
|
||||
|
||||
const pool = new Pool({
|
||||
// use a fake client so we can check we're passed the connectionString
|
||||
Client: function (args) {
|
||||
expect(args.connectionString).to.equal(connectionString)
|
||||
return {
|
||||
connect: function (cb) {
|
||||
cb(new Error('testing'))
|
||||
},
|
||||
on: function () {},
|
||||
}
|
||||
},
|
||||
connectionString: connectionString,
|
||||
})
|
||||
|
||||
pool.connect(function (err, client) {
|
||||
expect(err).to.not.be(undefined)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
229
backend/apis/nodejs/node_modules/pg-pool/test/connection-timeout.js
generated
vendored
Normal file
229
backend/apis/nodejs/node_modules/pg-pool/test/connection-timeout.js
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
'use strict'
|
||||
const net = require('net')
|
||||
const co = require('co')
|
||||
const expect = require('expect.js')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
const before = require('mocha').before
|
||||
const after = require('mocha').after
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('connection timeout', () => {
|
||||
const connectionFailure = new Error('Temporary connection failure')
|
||||
|
||||
before((done) => {
|
||||
this.server = net.createServer((socket) => {
|
||||
socket.on('data', () => {
|
||||
// discard any buffered data or the server wont terminate
|
||||
})
|
||||
})
|
||||
|
||||
this.server.listen(() => {
|
||||
this.port = this.server.address().port
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
after((done) => {
|
||||
this.server.close(done)
|
||||
})
|
||||
|
||||
it('should callback with an error if timeout is passed', (done) => {
|
||||
const pool = new Pool({ connectionTimeoutMillis: 10, port: this.port, host: 'localhost' })
|
||||
pool.connect((err, client, release) => {
|
||||
expect(err).to.be.an(Error)
|
||||
expect(err.message).to.contain('timeout')
|
||||
expect(client).to.equal(undefined)
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('should reject promise with an error if timeout is passed', (done) => {
|
||||
const pool = new Pool({ connectionTimeoutMillis: 10, port: this.port, host: 'localhost' })
|
||||
pool.connect().catch((err) => {
|
||||
expect(err).to.be.an(Error)
|
||||
expect(err.message).to.contain('timeout')
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it(
|
||||
'should handle multiple timeouts',
|
||||
co.wrap(
|
||||
function* () {
|
||||
const errors = []
|
||||
const pool = new Pool({ connectionTimeoutMillis: 1, port: this.port, host: 'localhost' })
|
||||
for (var i = 0; i < 15; i++) {
|
||||
try {
|
||||
yield pool.connect()
|
||||
} catch (e) {
|
||||
errors.push(e)
|
||||
}
|
||||
}
|
||||
expect(errors).to.have.length(15)
|
||||
}.bind(this)
|
||||
)
|
||||
)
|
||||
|
||||
it('should timeout on checkout of used connection', (done) => {
|
||||
const pool = new Pool({ connectionTimeoutMillis: 100, max: 1 })
|
||||
pool.connect((err, client, release) => {
|
||||
expect(err).to.be(undefined)
|
||||
expect(client).to.not.be(undefined)
|
||||
pool.connect((err, client) => {
|
||||
expect(err).to.be.an(Error)
|
||||
expect(client).to.be(undefined)
|
||||
release()
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('should not break further pending checkouts on a timeout', (done) => {
|
||||
const pool = new Pool({ connectionTimeoutMillis: 200, max: 1 })
|
||||
pool.connect((err, client, releaseOuter) => {
|
||||
expect(err).to.be(undefined)
|
||||
|
||||
pool.connect((err, client) => {
|
||||
expect(err).to.be.an(Error)
|
||||
expect(client).to.be(undefined)
|
||||
releaseOuter()
|
||||
})
|
||||
|
||||
setTimeout(() => {
|
||||
pool.connect((err, client, releaseInner) => {
|
||||
expect(err).to.be(undefined)
|
||||
expect(client).to.not.be(undefined)
|
||||
releaseInner()
|
||||
pool.end(done)
|
||||
})
|
||||
}, 100)
|
||||
})
|
||||
})
|
||||
|
||||
it('should timeout on query if all clients are busy', (done) => {
|
||||
const pool = new Pool({ connectionTimeoutMillis: 100, max: 1 })
|
||||
pool.connect((err, client, release) => {
|
||||
expect(err).to.be(undefined)
|
||||
expect(client).to.not.be(undefined)
|
||||
pool.query('select now()', (err, result) => {
|
||||
expect(err).to.be.an(Error)
|
||||
expect(result).to.be(undefined)
|
||||
release()
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('should recover from timeout errors', (done) => {
|
||||
const pool = new Pool({ connectionTimeoutMillis: 100, max: 1 })
|
||||
pool.connect((err, client, release) => {
|
||||
expect(err).to.be(undefined)
|
||||
expect(client).to.not.be(undefined)
|
||||
pool.query('select now()', (err, result) => {
|
||||
expect(err).to.be.an(Error)
|
||||
expect(result).to.be(undefined)
|
||||
release()
|
||||
pool.query('select $1::text as name', ['brianc'], (err, res) => {
|
||||
expect(err).to.be(undefined)
|
||||
expect(res.rows).to.have.length(1)
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('continues processing after a connection failure', (done) => {
|
||||
const Client = require('pg').Client
|
||||
const orgConnect = Client.prototype.connect
|
||||
let called = false
|
||||
|
||||
Client.prototype.connect = function (cb) {
|
||||
// Simulate a failure on first call
|
||||
if (!called) {
|
||||
called = true
|
||||
|
||||
return setTimeout(() => {
|
||||
cb(connectionFailure)
|
||||
}, 100)
|
||||
}
|
||||
// And pass-through the second call
|
||||
orgConnect.call(this, cb)
|
||||
}
|
||||
|
||||
const pool = new Pool({
|
||||
Client: Client,
|
||||
connectionTimeoutMillis: 1000,
|
||||
max: 1,
|
||||
})
|
||||
|
||||
pool.connect((err, client, release) => {
|
||||
expect(err).to.be(connectionFailure)
|
||||
|
||||
pool.query('select $1::text as name', ['brianc'], (err, res) => {
|
||||
expect(err).to.be(undefined)
|
||||
expect(res.rows).to.have.length(1)
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('releases newly connected clients if the queued already timed out', (done) => {
|
||||
const Client = require('pg').Client
|
||||
|
||||
const orgConnect = Client.prototype.connect
|
||||
|
||||
let connection = 0
|
||||
|
||||
Client.prototype.connect = function (cb) {
|
||||
// Simulate a failure on first call
|
||||
if (connection === 0) {
|
||||
connection++
|
||||
|
||||
return setTimeout(() => {
|
||||
cb(connectionFailure)
|
||||
}, 300)
|
||||
}
|
||||
|
||||
// And second connect taking > connection timeout
|
||||
if (connection === 1) {
|
||||
connection++
|
||||
|
||||
return setTimeout(() => {
|
||||
orgConnect.call(this, cb)
|
||||
}, 1000)
|
||||
}
|
||||
|
||||
orgConnect.call(this, cb)
|
||||
}
|
||||
|
||||
const pool = new Pool({
|
||||
Client: Client,
|
||||
connectionTimeoutMillis: 1000,
|
||||
max: 1,
|
||||
})
|
||||
|
||||
// Direct connect
|
||||
pool.connect((err, client, release) => {
|
||||
expect(err).to.be(connectionFailure)
|
||||
})
|
||||
|
||||
// Queued
|
||||
let called = 0
|
||||
pool.connect((err, client, release) => {
|
||||
// Verify the callback is only called once
|
||||
expect(called++).to.be(0)
|
||||
expect(err).to.be.an(Error)
|
||||
|
||||
pool.query('select $1::text as name', ['brianc'], (err, res) => {
|
||||
expect(err).to.be(undefined)
|
||||
expect(res.rows).to.have.length(1)
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
40
backend/apis/nodejs/node_modules/pg-pool/test/ending.js
generated
vendored
Normal file
40
backend/apis/nodejs/node_modules/pg-pool/test/ending.js
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
'use strict'
|
||||
const co = require('co')
|
||||
const expect = require('expect.js')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('pool ending', () => {
|
||||
it('ends without being used', (done) => {
|
||||
const pool = new Pool()
|
||||
pool.end(done)
|
||||
})
|
||||
|
||||
it('ends with a promise', () => {
|
||||
return new Pool().end()
|
||||
})
|
||||
|
||||
it(
|
||||
'ends with clients',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool()
|
||||
const res = yield pool.query('SELECT $1::text as name', ['brianc'])
|
||||
expect(res.rows[0].name).to.equal('brianc')
|
||||
return pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'allows client to finish',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool()
|
||||
const query = pool.query('SELECT $1::text as name', ['brianc'])
|
||||
yield pool.end()
|
||||
const res = yield query
|
||||
expect(res.rows[0].name).to.equal('brianc')
|
||||
})
|
||||
)
|
||||
})
|
260
backend/apis/nodejs/node_modules/pg-pool/test/error-handling.js
generated
vendored
Normal file
260
backend/apis/nodejs/node_modules/pg-pool/test/error-handling.js
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
||||
'use strict'
|
||||
const net = require('net')
|
||||
const co = require('co')
|
||||
const expect = require('expect.js')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('pool error handling', function () {
|
||||
it('Should complete these queries without dying', function (done) {
|
||||
const pool = new Pool()
|
||||
let errors = 0
|
||||
let shouldGet = 0
|
||||
function runErrorQuery() {
|
||||
shouldGet++
|
||||
return new Promise(function (resolve, reject) {
|
||||
pool
|
||||
.query("SELECT 'asd'+1 ")
|
||||
.then(function (res) {
|
||||
reject(res) // this should always error
|
||||
})
|
||||
.catch(function (err) {
|
||||
errors++
|
||||
resolve(err)
|
||||
})
|
||||
})
|
||||
}
|
||||
const ps = []
|
||||
for (let i = 0; i < 5; i++) {
|
||||
ps.push(runErrorQuery())
|
||||
}
|
||||
Promise.all(ps).then(function () {
|
||||
expect(shouldGet).to.eql(errors)
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
|
||||
it('Catches errors in client.query', async function () {
|
||||
let caught = false
|
||||
const pool = new Pool()
|
||||
try {
|
||||
await pool.query(null)
|
||||
} catch (e) {
|
||||
caught = true
|
||||
}
|
||||
pool.end()
|
||||
expect(caught).to.be(true)
|
||||
})
|
||||
|
||||
describe('calling release more than once', () => {
|
||||
it(
|
||||
'should throw each time',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool()
|
||||
const client = yield pool.connect()
|
||||
client.release()
|
||||
expect(() => client.release()).to.throwError()
|
||||
expect(() => client.release()).to.throwError()
|
||||
return yield pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it('should throw each time with callbacks', function (done) {
|
||||
const pool = new Pool()
|
||||
|
||||
pool.connect(function (err, client, clientDone) {
|
||||
expect(err).not.to.be.an(Error)
|
||||
clientDone()
|
||||
|
||||
expect(() => clientDone()).to.throwError()
|
||||
expect(() => clientDone()).to.throwError()
|
||||
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('using an ended pool', () => {
|
||||
it('rejects all additional promises', (done) => {
|
||||
const pool = new Pool()
|
||||
const promises = []
|
||||
pool.end().then(() => {
|
||||
const squash = (promise) => promise.catch((e) => 'okay!')
|
||||
promises.push(squash(pool.connect()))
|
||||
promises.push(squash(pool.query('SELECT NOW()')))
|
||||
promises.push(squash(pool.end()))
|
||||
Promise.all(promises).then((res) => {
|
||||
expect(res).to.eql(['okay!', 'okay!', 'okay!'])
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('returns an error on all additional callbacks', (done) => {
|
||||
const pool = new Pool()
|
||||
pool.end(() => {
|
||||
pool.query('SELECT *', (err) => {
|
||||
expect(err).to.be.an(Error)
|
||||
pool.connect((err) => {
|
||||
expect(err).to.be.an(Error)
|
||||
pool.end((err) => {
|
||||
expect(err).to.be.an(Error)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('error from idle client', () => {
|
||||
it(
|
||||
'removes client from pool',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool()
|
||||
const client = yield pool.connect()
|
||||
expect(pool.totalCount).to.equal(1)
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
client.release()
|
||||
yield new Promise((resolve, reject) => {
|
||||
process.nextTick(() => {
|
||||
let poolError
|
||||
pool.once('error', (err) => {
|
||||
poolError = err
|
||||
})
|
||||
|
||||
let clientError
|
||||
client.once('error', (err) => {
|
||||
clientError = err
|
||||
})
|
||||
|
||||
client.emit('error', new Error('expected'))
|
||||
|
||||
expect(clientError.message).to.equal('expected')
|
||||
expect(poolError.message).to.equal('expected')
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
pool.end().then(resolve, reject)
|
||||
})
|
||||
})
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
describe('error from in-use client', () => {
|
||||
it(
|
||||
'keeps the client in the pool',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool()
|
||||
const client = yield pool.connect()
|
||||
expect(pool.totalCount).to.equal(1)
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
|
||||
yield new Promise((resolve, reject) => {
|
||||
process.nextTick(() => {
|
||||
let poolError
|
||||
pool.once('error', (err) => {
|
||||
poolError = err
|
||||
})
|
||||
|
||||
let clientError
|
||||
client.once('error', (err) => {
|
||||
clientError = err
|
||||
})
|
||||
|
||||
client.emit('error', new Error('expected'))
|
||||
|
||||
expect(clientError.message).to.equal('expected')
|
||||
expect(poolError).not.to.be.ok()
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(1)
|
||||
client.release()
|
||||
pool.end().then(resolve, reject)
|
||||
})
|
||||
})
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
describe('passing a function to pool.query', () => {
|
||||
it('calls back with error', (done) => {
|
||||
const pool = new Pool()
|
||||
console.log('passing fn to query')
|
||||
pool.query((err) => {
|
||||
expect(err).to.be.an(Error)
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('pool with lots of errors', () => {
|
||||
it(
|
||||
'continues to work and provide new clients',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ max: 1 })
|
||||
const errors = []
|
||||
for (var i = 0; i < 20; i++) {
|
||||
try {
|
||||
yield pool.query('invalid sql')
|
||||
} catch (err) {
|
||||
errors.push(err)
|
||||
}
|
||||
}
|
||||
expect(errors).to.have.length(20)
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
expect(pool.query).to.be.a(Function)
|
||||
const res = yield pool.query('SELECT $1::text as name', ['brianc'])
|
||||
expect(res.rows).to.have.length(1)
|
||||
expect(res.rows[0].name).to.equal('brianc')
|
||||
return pool.end()
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
it('should continue with queued items after a connection failure', (done) => {
|
||||
const closeServer = net
|
||||
.createServer((socket) => {
|
||||
socket.destroy()
|
||||
})
|
||||
.unref()
|
||||
|
||||
closeServer.listen(() => {
|
||||
const pool = new Pool({ max: 1, port: closeServer.address().port, host: 'localhost' })
|
||||
pool.connect((err) => {
|
||||
expect(err).to.be.an(Error)
|
||||
if (err.code) {
|
||||
expect(err.code).to.be('ECONNRESET')
|
||||
}
|
||||
})
|
||||
pool.connect((err) => {
|
||||
expect(err).to.be.an(Error)
|
||||
if (err.code) {
|
||||
expect(err.code).to.be('ECONNRESET')
|
||||
}
|
||||
closeServer.close(() => {
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('handles post-checkout client failures in pool.query', (done) => {
|
||||
const pool = new Pool({ max: 1 })
|
||||
pool.on('error', () => {
|
||||
// We double close the connection in this test, prevent exception caused by that
|
||||
})
|
||||
pool.query('SELECT pg_sleep(5)', [], (err) => {
|
||||
expect(err).to.be.an(Error)
|
||||
done()
|
||||
})
|
||||
|
||||
setTimeout(() => {
|
||||
pool._clients[0].end()
|
||||
}, 1000)
|
||||
})
|
||||
})
|
124
backend/apis/nodejs/node_modules/pg-pool/test/events.js
generated
vendored
Normal file
124
backend/apis/nodejs/node_modules/pg-pool/test/events.js
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
'use strict'
|
||||
|
||||
const expect = require('expect.js')
|
||||
const EventEmitter = require('events').EventEmitter
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
const Pool = require('../')
|
||||
|
||||
describe('events', function () {
|
||||
it('emits connect before callback', function (done) {
|
||||
const pool = new Pool()
|
||||
let emittedClient = false
|
||||
pool.on('connect', function (client) {
|
||||
emittedClient = client
|
||||
})
|
||||
|
||||
pool.connect(function (err, client, release) {
|
||||
if (err) return done(err)
|
||||
release()
|
||||
pool.end()
|
||||
expect(client).to.be(emittedClient)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('emits "connect" only with a successful connection', function () {
|
||||
const pool = new Pool({
|
||||
// This client will always fail to connect
|
||||
Client: mockClient({
|
||||
connect: function (cb) {
|
||||
process.nextTick(() => {
|
||||
cb(new Error('bad news'))
|
||||
})
|
||||
},
|
||||
}),
|
||||
})
|
||||
pool.on('connect', function () {
|
||||
throw new Error('should never get here')
|
||||
})
|
||||
return pool.connect().catch((e) => expect(e.message).to.equal('bad news'))
|
||||
})
|
||||
|
||||
it('emits acquire every time a client is acquired', function (done) {
|
||||
const pool = new Pool()
|
||||
let acquireCount = 0
|
||||
pool.on('acquire', function (client) {
|
||||
expect(client).to.be.ok()
|
||||
acquireCount++
|
||||
})
|
||||
for (let i = 0; i < 10; i++) {
|
||||
pool.connect(function (err, client, release) {
|
||||
if (err) return done(err)
|
||||
release()
|
||||
})
|
||||
pool.query('SELECT now()')
|
||||
}
|
||||
setTimeout(function () {
|
||||
expect(acquireCount).to.be(20)
|
||||
pool.end(done)
|
||||
}, 100)
|
||||
})
|
||||
|
||||
it('emits release every time a client is released', function (done) {
|
||||
const pool = new Pool()
|
||||
let releaseCount = 0
|
||||
pool.on('release', function (err, client) {
|
||||
expect(err instanceof Error).not.to.be(true)
|
||||
expect(client).to.be.ok()
|
||||
releaseCount++
|
||||
})
|
||||
const promises = []
|
||||
for (let i = 0; i < 10; i++) {
|
||||
pool.connect(function (err, client, release) {
|
||||
if (err) return done(err)
|
||||
release()
|
||||
})
|
||||
promises.push(pool.query('SELECT now()'))
|
||||
}
|
||||
Promise.all(promises).then(() => {
|
||||
pool.end(() => {
|
||||
expect(releaseCount).to.be(20)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('emits release with an error if client is released due to an error', function (done) {
|
||||
const pool = new Pool()
|
||||
pool.connect(function (err, client, release) {
|
||||
expect(err).to.equal(undefined)
|
||||
const releaseError = new Error('problem')
|
||||
pool.once('release', function (err, errClient) {
|
||||
expect(err).to.equal(releaseError)
|
||||
expect(errClient).to.equal(client)
|
||||
pool.end(done)
|
||||
})
|
||||
release(releaseError)
|
||||
})
|
||||
})
|
||||
|
||||
it('emits error and client if an idle client in the pool hits an error', function (done) {
|
||||
const pool = new Pool()
|
||||
pool.connect(function (err, client) {
|
||||
expect(err).to.equal(undefined)
|
||||
client.release()
|
||||
setImmediate(function () {
|
||||
client.emit('error', new Error('problem'))
|
||||
})
|
||||
pool.once('error', function (err, errClient) {
|
||||
expect(err.message).to.equal('problem')
|
||||
expect(errClient).to.equal(client)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
function mockClient(methods) {
|
||||
return function () {
|
||||
const client = new EventEmitter()
|
||||
Object.assign(client, methods)
|
||||
return client
|
||||
}
|
||||
}
|
20
backend/apis/nodejs/node_modules/pg-pool/test/idle-timeout-exit.js
generated
vendored
Normal file
20
backend/apis/nodejs/node_modules/pg-pool/test/idle-timeout-exit.js
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// This test is meant to be spawned from idle-timeout.js
|
||||
if (module === require.main) {
|
||||
const allowExitOnIdle = process.env.ALLOW_EXIT_ON_IDLE === '1'
|
||||
const Pool = require('../index')
|
||||
|
||||
const pool = new Pool({
|
||||
maxLifetimeSeconds: 2,
|
||||
idleTimeoutMillis: 200,
|
||||
...(allowExitOnIdle ? { allowExitOnIdle: true } : {}),
|
||||
})
|
||||
pool.query('SELECT NOW()', (err, res) => console.log('completed first'))
|
||||
pool.on('remove', () => {
|
||||
console.log('removed')
|
||||
done()
|
||||
})
|
||||
|
||||
setTimeout(() => {
|
||||
pool.query('SELECT * from generate_series(0, 1000)', (err, res) => console.log('completed second'))
|
||||
}, 50)
|
||||
}
|
118
backend/apis/nodejs/node_modules/pg-pool/test/idle-timeout.js
generated
vendored
Normal file
118
backend/apis/nodejs/node_modules/pg-pool/test/idle-timeout.js
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
'use strict'
|
||||
const co = require('co')
|
||||
const expect = require('expect.js')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
const { fork } = require('child_process')
|
||||
const path = require('path')
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
const wait = (time) => new Promise((resolve) => setTimeout(resolve, time))
|
||||
|
||||
describe('idle timeout', () => {
|
||||
it('should timeout and remove the client', (done) => {
|
||||
const pool = new Pool({ idleTimeoutMillis: 10 })
|
||||
pool.query('SELECT NOW()')
|
||||
pool.on('remove', () => {
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it(
|
||||
'times out and removes clients when others are also removed',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ idleTimeoutMillis: 10 })
|
||||
const clientA = yield pool.connect()
|
||||
const clientB = yield pool.connect()
|
||||
clientA.release()
|
||||
clientB.release(new Error())
|
||||
|
||||
const removal = new Promise((resolve) => {
|
||||
pool.on('remove', () => {
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
resolve()
|
||||
})
|
||||
})
|
||||
|
||||
const timeout = wait(100).then(() => Promise.reject(new Error('Idle timeout failed to occur')))
|
||||
|
||||
try {
|
||||
yield Promise.race([removal, timeout])
|
||||
} finally {
|
||||
pool.end()
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'can remove idle clients and recreate them',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ idleTimeoutMillis: 1 })
|
||||
const results = []
|
||||
for (var i = 0; i < 20; i++) {
|
||||
let query = pool.query('SELECT NOW()')
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(1)
|
||||
results.push(yield query)
|
||||
yield wait(2)
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
}
|
||||
expect(results).to.have.length(20)
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'does not time out clients which are used',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ idleTimeoutMillis: 1 })
|
||||
const results = []
|
||||
for (var i = 0; i < 20; i++) {
|
||||
let client = yield pool.connect()
|
||||
expect(pool.totalCount).to.equal(1)
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
yield wait(10)
|
||||
results.push(yield client.query('SELECT NOW()'))
|
||||
client.release()
|
||||
expect(pool.idleCount).to.equal(1)
|
||||
expect(pool.totalCount).to.equal(1)
|
||||
}
|
||||
expect(results).to.have.length(20)
|
||||
return pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it('unrefs the connections and timeouts so the program can exit when idle when the allowExitOnIdle option is set', function (done) {
|
||||
const child = fork(path.join(__dirname, 'idle-timeout-exit.js'), [], {
|
||||
silent: true,
|
||||
env: { ...process.env, ALLOW_EXIT_ON_IDLE: '1' },
|
||||
})
|
||||
let result = ''
|
||||
child.stdout.setEncoding('utf8')
|
||||
child.stdout.on('data', (chunk) => (result += chunk))
|
||||
child.on('error', (err) => done(err))
|
||||
child.on('close', () => {
|
||||
expect(result).to.equal('completed first\ncompleted second\n')
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('keeps old behavior when allowExitOnIdle option is not set', function (done) {
|
||||
const child = fork(path.join(__dirname, 'idle-timeout-exit.js'), [], {
|
||||
silent: true,
|
||||
})
|
||||
let result = ''
|
||||
child.stdout.setEncoding('utf8')
|
||||
child.stdout.on('data', (chunk) => (result += chunk))
|
||||
child.on('error', (err) => done(err))
|
||||
child.on('close', () => {
|
||||
expect(result).to.equal('completed first\ncompleted second\nremoved\n')
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
226
backend/apis/nodejs/node_modules/pg-pool/test/index.js
generated
vendored
Normal file
226
backend/apis/nodejs/node_modules/pg-pool/test/index.js
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
||||
'use strict'
|
||||
const expect = require('expect.js')
|
||||
const _ = require('lodash')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('pool', function () {
|
||||
describe('with callbacks', function () {
|
||||
it('works totally unconfigured', function (done) {
|
||||
const pool = new Pool()
|
||||
pool.connect(function (err, client, release) {
|
||||
if (err) return done(err)
|
||||
client.query('SELECT NOW()', function (err, res) {
|
||||
release()
|
||||
if (err) return done(err)
|
||||
expect(res.rows).to.have.length(1)
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('passes props to clients', function (done) {
|
||||
const pool = new Pool({ binary: true })
|
||||
pool.connect(function (err, client, release) {
|
||||
release()
|
||||
if (err) return done(err)
|
||||
expect(client.binary).to.eql(true)
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
|
||||
it('can run a query with a callback without parameters', function (done) {
|
||||
const pool = new Pool()
|
||||
pool.query('SELECT 1 as num', function (err, res) {
|
||||
expect(res.rows[0]).to.eql({ num: 1 })
|
||||
pool.end(function () {
|
||||
done(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('can run a query with a callback', function (done) {
|
||||
const pool = new Pool()
|
||||
pool.query('SELECT $1::text as name', ['brianc'], function (err, res) {
|
||||
expect(res.rows[0]).to.eql({ name: 'brianc' })
|
||||
pool.end(function () {
|
||||
done(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('passes connection errors to callback', function (done) {
|
||||
const pool = new Pool({ port: 53922 })
|
||||
pool.query('SELECT $1::text as name', ['brianc'], function (err, res) {
|
||||
expect(res).to.be(undefined)
|
||||
expect(err).to.be.an(Error)
|
||||
// a connection error should not polute the pool with a dead client
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
pool.end(function (err) {
|
||||
done(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('does not pass client to error callback', function (done) {
|
||||
const pool = new Pool({ port: 58242 })
|
||||
pool.connect(function (err, client, release) {
|
||||
expect(err).to.be.an(Error)
|
||||
expect(client).to.be(undefined)
|
||||
expect(release).to.be.a(Function)
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
|
||||
it('removes client if it errors in background', function (done) {
|
||||
const pool = new Pool()
|
||||
pool.connect(function (err, client, release) {
|
||||
release()
|
||||
if (err) return done(err)
|
||||
client.testString = 'foo'
|
||||
setTimeout(function () {
|
||||
client.emit('error', new Error('on purpose'))
|
||||
}, 10)
|
||||
})
|
||||
pool.on('error', function (err) {
|
||||
expect(err.message).to.be('on purpose')
|
||||
expect(err.client).to.not.be(undefined)
|
||||
expect(err.client.testString).to.be('foo')
|
||||
err.client.connection.stream.on('end', function () {
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('should not change given options', function (done) {
|
||||
const options = { max: 10 }
|
||||
const pool = new Pool(options)
|
||||
pool.connect(function (err, client, release) {
|
||||
release()
|
||||
if (err) return done(err)
|
||||
expect(options).to.eql({ max: 10 })
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
|
||||
it('does not create promises when connecting', function (done) {
|
||||
const pool = new Pool()
|
||||
const returnValue = pool.connect(function (err, client, release) {
|
||||
release()
|
||||
if (err) return done(err)
|
||||
pool.end(done)
|
||||
})
|
||||
expect(returnValue).to.be(undefined)
|
||||
})
|
||||
|
||||
it('does not create promises when querying', function (done) {
|
||||
const pool = new Pool()
|
||||
const returnValue = pool.query('SELECT 1 as num', function (err) {
|
||||
pool.end(function () {
|
||||
done(err)
|
||||
})
|
||||
})
|
||||
expect(returnValue).to.be(undefined)
|
||||
})
|
||||
|
||||
it('does not create promises when ending', function (done) {
|
||||
const pool = new Pool()
|
||||
const returnValue = pool.end(done)
|
||||
expect(returnValue).to.be(undefined)
|
||||
})
|
||||
|
||||
it('never calls callback syncronously', function (done) {
|
||||
const pool = new Pool()
|
||||
pool.connect((err, client) => {
|
||||
if (err) throw err
|
||||
client.release()
|
||||
setImmediate(() => {
|
||||
let called = false
|
||||
pool.connect((err, client) => {
|
||||
if (err) throw err
|
||||
called = true
|
||||
client.release()
|
||||
setImmediate(() => {
|
||||
pool.end(done)
|
||||
})
|
||||
})
|
||||
expect(called).to.equal(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('with promises', function () {
|
||||
it('connects, queries, and disconnects', function () {
|
||||
const pool = new Pool()
|
||||
return pool.connect().then(function (client) {
|
||||
return client.query('select $1::text as name', ['hi']).then(function (res) {
|
||||
expect(res.rows).to.eql([{ name: 'hi' }])
|
||||
client.release()
|
||||
return pool.end()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('executes a query directly', () => {
|
||||
const pool = new Pool()
|
||||
return pool.query('SELECT $1::text as name', ['hi']).then((res) => {
|
||||
expect(res.rows).to.have.length(1)
|
||||
expect(res.rows[0].name).to.equal('hi')
|
||||
return pool.end()
|
||||
})
|
||||
})
|
||||
|
||||
it('properly pools clients', function () {
|
||||
const pool = new Pool({ poolSize: 9 })
|
||||
const promises = _.times(30, function () {
|
||||
return pool.connect().then(function (client) {
|
||||
return client.query('select $1::text as name', ['hi']).then(function (res) {
|
||||
client.release()
|
||||
return res
|
||||
})
|
||||
})
|
||||
})
|
||||
return Promise.all(promises).then(function (res) {
|
||||
expect(res).to.have.length(30)
|
||||
expect(pool.totalCount).to.be(9)
|
||||
return pool.end()
|
||||
})
|
||||
})
|
||||
|
||||
it('supports just running queries', function () {
|
||||
const pool = new Pool({ poolSize: 9 })
|
||||
const text = 'select $1::text as name'
|
||||
const values = ['hi']
|
||||
const query = { text: text, values: values }
|
||||
const promises = _.times(30, () => pool.query(query))
|
||||
return Promise.all(promises).then(function (queries) {
|
||||
expect(queries).to.have.length(30)
|
||||
return pool.end()
|
||||
})
|
||||
})
|
||||
|
||||
it('recovers from query errors', function () {
|
||||
const pool = new Pool()
|
||||
|
||||
const errors = []
|
||||
const promises = _.times(30, () => {
|
||||
return pool.query('SELECT asldkfjasldkf').catch(function (e) {
|
||||
errors.push(e)
|
||||
})
|
||||
})
|
||||
return Promise.all(promises).then(() => {
|
||||
expect(errors).to.have.length(30)
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
expect(pool.idleCount).to.equal(0)
|
||||
return pool.query('SELECT $1::text as name', ['hi']).then(function (res) {
|
||||
expect(res.rows).to.eql([{ name: 'hi' }])
|
||||
return pool.end()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
47
backend/apis/nodejs/node_modules/pg-pool/test/lifetime-timeout.js
generated
vendored
Normal file
47
backend/apis/nodejs/node_modules/pg-pool/test/lifetime-timeout.js
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
'use strict'
|
||||
const co = require('co')
|
||||
const expect = require('expect.js')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('lifetime timeout', () => {
|
||||
it('connection lifetime should expire and remove the client', (done) => {
|
||||
const pool = new Pool({ maxLifetimeSeconds: 1 })
|
||||
pool.query('SELECT NOW()')
|
||||
pool.on('remove', () => {
|
||||
console.log('expired while idle - on-remove event')
|
||||
expect(pool.expiredCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
done()
|
||||
})
|
||||
})
|
||||
it('connection lifetime should expire and remove the client after the client is done working', (done) => {
|
||||
const pool = new Pool({ maxLifetimeSeconds: 1 })
|
||||
pool.query('SELECT pg_sleep(1.4)')
|
||||
pool.on('remove', () => {
|
||||
console.log('expired while busy - on-remove event')
|
||||
expect(pool.expiredCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
done()
|
||||
})
|
||||
})
|
||||
it(
|
||||
'can remove expired clients and recreate them',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ maxLifetimeSeconds: 1 })
|
||||
let query = pool.query('SELECT pg_sleep(1.4)')
|
||||
expect(pool.expiredCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(1)
|
||||
yield query
|
||||
yield new Promise((resolve) => setTimeout(resolve, 100))
|
||||
expect(pool.expiredCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(0)
|
||||
yield pool.query('SELECT NOW()')
|
||||
expect(pool.expiredCount).to.equal(0)
|
||||
expect(pool.totalCount).to.equal(1)
|
||||
})
|
||||
)
|
||||
})
|
20
backend/apis/nodejs/node_modules/pg-pool/test/logging.js
generated
vendored
Normal file
20
backend/apis/nodejs/node_modules/pg-pool/test/logging.js
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
const expect = require('expect.js')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('logging', function () {
|
||||
it('logs to supplied log function if given', function () {
|
||||
const messages = []
|
||||
const log = function (msg) {
|
||||
messages.push(msg)
|
||||
}
|
||||
const pool = new Pool({ log: log })
|
||||
return pool.query('SELECT NOW()').then(function () {
|
||||
expect(messages.length).to.be.greaterThan(0)
|
||||
return pool.end()
|
||||
})
|
||||
})
|
||||
})
|
97
backend/apis/nodejs/node_modules/pg-pool/test/max-uses.js
generated
vendored
Normal file
97
backend/apis/nodejs/node_modules/pg-pool/test/max-uses.js
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
const expect = require('expect.js')
|
||||
const co = require('co')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('maxUses', () => {
|
||||
it(
|
||||
'can create a single client and use it once',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ maxUses: 2 })
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
const client = yield pool.connect()
|
||||
const res = yield client.query('SELECT $1::text as name', ['hi'])
|
||||
expect(res.rows[0].name).to.equal('hi')
|
||||
client.release()
|
||||
pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'getting a connection a second time returns the same connection and releasing it also closes it',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ maxUses: 2 })
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
const client = yield pool.connect()
|
||||
client.release()
|
||||
const client2 = yield pool.connect()
|
||||
expect(client).to.equal(client2)
|
||||
expect(client2._ending).to.equal(false)
|
||||
client2.release()
|
||||
expect(client2._ending).to.equal(true)
|
||||
return yield pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'getting a connection a third time returns a new connection',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ maxUses: 2 })
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
const client = yield pool.connect()
|
||||
client.release()
|
||||
const client2 = yield pool.connect()
|
||||
expect(client).to.equal(client2)
|
||||
client2.release()
|
||||
const client3 = yield pool.connect()
|
||||
expect(client3).not.to.equal(client2)
|
||||
client3.release()
|
||||
return yield pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'getting a connection from a pending request gets a fresh client when the released candidate is expended',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ max: 1, maxUses: 2 })
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
const client1 = yield pool.connect()
|
||||
pool.connect().then((client2) => {
|
||||
expect(client2).to.equal(client1)
|
||||
expect(pool.waitingCount).to.equal(1)
|
||||
// Releasing the client this time should also expend it since maxUses is 2, causing client3 to be a fresh client
|
||||
client2.release()
|
||||
})
|
||||
const client3Promise = pool.connect().then((client3) => {
|
||||
// client3 should be a fresh client since client2's release caused the first client to be expended
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
expect(client3).not.to.equal(client1)
|
||||
return client3.release()
|
||||
})
|
||||
// There should be two pending requests since we have 3 connect requests but a max size of 1
|
||||
expect(pool.waitingCount).to.equal(2)
|
||||
// Releasing the client should not yet expend it since maxUses is 2
|
||||
client1.release()
|
||||
yield client3Promise
|
||||
return yield pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'logs when removing an expended client',
|
||||
co.wrap(function* () {
|
||||
const messages = []
|
||||
const log = function (msg) {
|
||||
messages.push(msg)
|
||||
}
|
||||
const pool = new Pool({ maxUses: 1, log })
|
||||
const client = yield pool.connect()
|
||||
client.release()
|
||||
expect(messages).to.contain('remove expended client')
|
||||
return yield pool.end()
|
||||
})
|
||||
)
|
||||
})
|
53
backend/apis/nodejs/node_modules/pg-pool/test/releasing-clients.js
generated
vendored
Normal file
53
backend/apis/nodejs/node_modules/pg-pool/test/releasing-clients.js
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
const Pool = require('../')
|
||||
|
||||
const expect = require('expect.js')
|
||||
|
||||
describe('releasing clients', () => {
|
||||
it('removes a client which cannot be queried', async () => {
|
||||
// make a pool w/ only 1 client
|
||||
const pool = new Pool({ max: 1 })
|
||||
expect(pool.totalCount).to.eql(0)
|
||||
const client = await pool.connect()
|
||||
expect(pool.totalCount).to.eql(1)
|
||||
expect(pool.idleCount).to.eql(0)
|
||||
// reach into the client and sever its connection
|
||||
client.connection.end()
|
||||
|
||||
// wait for the client to error out
|
||||
const err = await new Promise((resolve) => client.once('error', resolve))
|
||||
expect(err).to.be.ok()
|
||||
expect(pool.totalCount).to.eql(1)
|
||||
expect(pool.idleCount).to.eql(0)
|
||||
|
||||
// try to return it to the pool - this removes it because its broken
|
||||
client.release()
|
||||
expect(pool.totalCount).to.eql(0)
|
||||
expect(pool.idleCount).to.eql(0)
|
||||
|
||||
// make sure pool still works
|
||||
const { rows } = await pool.query('SELECT NOW()')
|
||||
expect(rows).to.have.length(1)
|
||||
await pool.end()
|
||||
})
|
||||
|
||||
it('removes a client which is ending', async () => {
|
||||
// make a pool w/ only 1 client
|
||||
const pool = new Pool({ max: 1 })
|
||||
expect(pool.totalCount).to.eql(0)
|
||||
const client = await pool.connect()
|
||||
expect(pool.totalCount).to.eql(1)
|
||||
expect(pool.idleCount).to.eql(0)
|
||||
// end the client gracefully (but you shouldn't do this with pooled clients)
|
||||
client.end()
|
||||
|
||||
// try to return it to the pool
|
||||
client.release()
|
||||
expect(pool.totalCount).to.eql(0)
|
||||
expect(pool.idleCount).to.eql(0)
|
||||
|
||||
// make sure pool still works
|
||||
const { rows } = await pool.query('SELECT NOW()')
|
||||
expect(rows).to.have.length(1)
|
||||
await pool.end()
|
||||
})
|
||||
})
|
10
backend/apis/nodejs/node_modules/pg-pool/test/setup.js
generated
vendored
Normal file
10
backend/apis/nodejs/node_modules/pg-pool/test/setup.js
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
const crash = (reason) => {
|
||||
process.on(reason, (err) => {
|
||||
console.error(reason, err.stack)
|
||||
process.exit(-1)
|
||||
})
|
||||
}
|
||||
|
||||
crash('unhandledRejection')
|
||||
crash('uncaughtError')
|
||||
crash('warning')
|
58
backend/apis/nodejs/node_modules/pg-pool/test/sizing.js
generated
vendored
Normal file
58
backend/apis/nodejs/node_modules/pg-pool/test/sizing.js
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
const expect = require('expect.js')
|
||||
const co = require('co')
|
||||
const _ = require('lodash')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('pool size of 1', () => {
|
||||
it(
|
||||
'can create a single client and use it once',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ max: 1 })
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
const client = yield pool.connect()
|
||||
const res = yield client.query('SELECT $1::text as name', ['hi'])
|
||||
expect(res.rows[0].name).to.equal('hi')
|
||||
client.release()
|
||||
pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'can create a single client and use it multiple times',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ max: 1 })
|
||||
expect(pool.waitingCount).to.equal(0)
|
||||
const client = yield pool.connect()
|
||||
const wait = pool.connect()
|
||||
expect(pool.waitingCount).to.equal(1)
|
||||
client.release()
|
||||
const client2 = yield wait
|
||||
expect(client).to.equal(client2)
|
||||
client2.release()
|
||||
return yield pool.end()
|
||||
})
|
||||
)
|
||||
|
||||
it(
|
||||
'can only send 1 query at a time',
|
||||
co.wrap(function* () {
|
||||
const pool = new Pool({ max: 1 })
|
||||
|
||||
// the query text column name changed in PostgreSQL 9.2
|
||||
const versionResult = yield pool.query('SHOW server_version_num')
|
||||
const version = parseInt(versionResult.rows[0].server_version_num, 10)
|
||||
const queryColumn = version < 90200 ? 'current_query' : 'query'
|
||||
|
||||
const queryText = 'SELECT COUNT(*) as counts FROM pg_stat_activity WHERE ' + queryColumn + ' = $1'
|
||||
const queries = _.times(20, () => pool.query(queryText, [queryText]))
|
||||
const results = yield Promise.all(queries)
|
||||
const counts = results.map((res) => parseInt(res.rows[0].counts, 10))
|
||||
expect(counts).to.eql(_.times(20, (i) => 1))
|
||||
return yield pool.end()
|
||||
})
|
||||
)
|
||||
})
|
19
backend/apis/nodejs/node_modules/pg-pool/test/submittable.js
generated
vendored
Normal file
19
backend/apis/nodejs/node_modules/pg-pool/test/submittable.js
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
'use strict'
|
||||
const Cursor = require('pg-cursor')
|
||||
const expect = require('expect.js')
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('submittle', () => {
|
||||
it('is returned from the query method', false, (done) => {
|
||||
const pool = new Pool()
|
||||
const cursor = pool.query(new Cursor('SELECT * from generate_series(0, 1000)'))
|
||||
cursor.read((err, rows) => {
|
||||
expect(err).to.be(undefined)
|
||||
expect(!!rows).to.be.ok()
|
||||
cursor.close(done)
|
||||
})
|
||||
})
|
||||
})
|
0
backend/apis/nodejs/node_modules/pg-pool/test/timeout.js
generated
vendored
Normal file
0
backend/apis/nodejs/node_modules/pg-pool/test/timeout.js
generated
vendored
Normal file
24
backend/apis/nodejs/node_modules/pg-pool/test/verify.js
generated
vendored
Normal file
24
backend/apis/nodejs/node_modules/pg-pool/test/verify.js
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
'use strict'
|
||||
const expect = require('expect.js')
|
||||
|
||||
const describe = require('mocha').describe
|
||||
const it = require('mocha').it
|
||||
|
||||
const Pool = require('../')
|
||||
|
||||
describe('verify', () => {
|
||||
it('verifies a client with a callback', (done) => {
|
||||
const pool = new Pool({
|
||||
verify: (client, cb) => {
|
||||
cb(new Error('nope'))
|
||||
},
|
||||
})
|
||||
|
||||
pool.connect((err, client) => {
|
||||
expect(err).to.be.an(Error)
|
||||
expect(err.message).to.be('nope')
|
||||
pool.end()
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
Reference in New Issue
Block a user