mirror of
https://github.com/xfarrow/blink
synced 2025-06-27 09:03:02 +02:00
Change endpoint from persons to people
This commit is contained in:
26
backend/apis/nodejs/node_modules/minizlib/LICENSE
generated
vendored
Normal file
26
backend/apis/nodejs/node_modules/minizlib/LICENSE
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
Minizlib was created by Isaac Z. Schlueter.
|
||||
It is a derivative work of the Node.js project.
|
||||
|
||||
"""
|
||||
Copyright Isaac Z. Schlueter and Contributors
|
||||
Copyright Node.js contributors. All rights reserved.
|
||||
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
"""
|
60
backend/apis/nodejs/node_modules/minizlib/README.md
generated
vendored
Normal file
60
backend/apis/nodejs/node_modules/minizlib/README.md
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
# minizlib
|
||||
|
||||
A fast zlib stream built on [minipass](http://npm.im/minipass) and
|
||||
Node.js's zlib binding.
|
||||
|
||||
This module was created to serve the needs of
|
||||
[node-tar](http://npm.im/tar) and
|
||||
[minipass-fetch](http://npm.im/minipass-fetch).
|
||||
|
||||
Brotli is supported in versions of node with a Brotli binding.
|
||||
|
||||
## How does this differ from the streams in `require('zlib')`?
|
||||
|
||||
First, there are no convenience methods to compress or decompress a
|
||||
buffer. If you want those, use the built-in `zlib` module. This is
|
||||
only streams. That being said, Minipass streams to make it fairly easy to
|
||||
use as one-liners: `new zlib.Deflate().end(data).read()` will return the
|
||||
deflate compressed result.
|
||||
|
||||
This module compresses and decompresses the data as fast as you feed
|
||||
it in. It is synchronous, and runs on the main process thread. Zlib
|
||||
and Brotli operations can be high CPU, but they're very fast, and doing it
|
||||
this way means much less bookkeeping and artificial deferral.
|
||||
|
||||
Node's built in zlib streams are built on top of `stream.Transform`.
|
||||
They do the maximally safe thing with respect to consistent
|
||||
asynchrony, buffering, and backpressure.
|
||||
|
||||
See [Minipass](http://npm.im/minipass) for more on the differences between
|
||||
Node.js core streams and Minipass streams, and the convenience methods
|
||||
provided by that class.
|
||||
|
||||
## Classes
|
||||
|
||||
- Deflate
|
||||
- Inflate
|
||||
- Gzip
|
||||
- Gunzip
|
||||
- DeflateRaw
|
||||
- InflateRaw
|
||||
- Unzip
|
||||
- BrotliCompress (Node v10 and higher)
|
||||
- BrotliDecompress (Node v10 and higher)
|
||||
|
||||
## USAGE
|
||||
|
||||
```js
|
||||
const zlib = require('minizlib')
|
||||
const input = sourceOfCompressedData()
|
||||
const decode = new zlib.BrotliDecompress()
|
||||
const output = whereToWriteTheDecodedData()
|
||||
input.pipe(decode).pipe(output)
|
||||
```
|
||||
|
||||
## REPRODUCIBLE BUILDS
|
||||
|
||||
To create reproducible gzip compressed files across different operating
|
||||
systems, set `portable: true` in the options. This causes minizlib to set
|
||||
the `OS` indicator in byte 9 of the extended gzip header to `0xFF` for
|
||||
'unknown'.
|
115
backend/apis/nodejs/node_modules/minizlib/constants.js
generated
vendored
Normal file
115
backend/apis/nodejs/node_modules/minizlib/constants.js
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
// Update with any zlib constants that are added or changed in the future.
|
||||
// Node v6 didn't export this, so we just hard code the version and rely
|
||||
// on all the other hard-coded values from zlib v4736. When node v6
|
||||
// support drops, we can just export the realZlibConstants object.
|
||||
const realZlibConstants = require('zlib').constants ||
|
||||
/* istanbul ignore next */ { ZLIB_VERNUM: 4736 }
|
||||
|
||||
module.exports = Object.freeze(Object.assign(Object.create(null), {
|
||||
Z_NO_FLUSH: 0,
|
||||
Z_PARTIAL_FLUSH: 1,
|
||||
Z_SYNC_FLUSH: 2,
|
||||
Z_FULL_FLUSH: 3,
|
||||
Z_FINISH: 4,
|
||||
Z_BLOCK: 5,
|
||||
Z_OK: 0,
|
||||
Z_STREAM_END: 1,
|
||||
Z_NEED_DICT: 2,
|
||||
Z_ERRNO: -1,
|
||||
Z_STREAM_ERROR: -2,
|
||||
Z_DATA_ERROR: -3,
|
||||
Z_MEM_ERROR: -4,
|
||||
Z_BUF_ERROR: -5,
|
||||
Z_VERSION_ERROR: -6,
|
||||
Z_NO_COMPRESSION: 0,
|
||||
Z_BEST_SPEED: 1,
|
||||
Z_BEST_COMPRESSION: 9,
|
||||
Z_DEFAULT_COMPRESSION: -1,
|
||||
Z_FILTERED: 1,
|
||||
Z_HUFFMAN_ONLY: 2,
|
||||
Z_RLE: 3,
|
||||
Z_FIXED: 4,
|
||||
Z_DEFAULT_STRATEGY: 0,
|
||||
DEFLATE: 1,
|
||||
INFLATE: 2,
|
||||
GZIP: 3,
|
||||
GUNZIP: 4,
|
||||
DEFLATERAW: 5,
|
||||
INFLATERAW: 6,
|
||||
UNZIP: 7,
|
||||
BROTLI_DECODE: 8,
|
||||
BROTLI_ENCODE: 9,
|
||||
Z_MIN_WINDOWBITS: 8,
|
||||
Z_MAX_WINDOWBITS: 15,
|
||||
Z_DEFAULT_WINDOWBITS: 15,
|
||||
Z_MIN_CHUNK: 64,
|
||||
Z_MAX_CHUNK: Infinity,
|
||||
Z_DEFAULT_CHUNK: 16384,
|
||||
Z_MIN_MEMLEVEL: 1,
|
||||
Z_MAX_MEMLEVEL: 9,
|
||||
Z_DEFAULT_MEMLEVEL: 8,
|
||||
Z_MIN_LEVEL: -1,
|
||||
Z_MAX_LEVEL: 9,
|
||||
Z_DEFAULT_LEVEL: -1,
|
||||
BROTLI_OPERATION_PROCESS: 0,
|
||||
BROTLI_OPERATION_FLUSH: 1,
|
||||
BROTLI_OPERATION_FINISH: 2,
|
||||
BROTLI_OPERATION_EMIT_METADATA: 3,
|
||||
BROTLI_MODE_GENERIC: 0,
|
||||
BROTLI_MODE_TEXT: 1,
|
||||
BROTLI_MODE_FONT: 2,
|
||||
BROTLI_DEFAULT_MODE: 0,
|
||||
BROTLI_MIN_QUALITY: 0,
|
||||
BROTLI_MAX_QUALITY: 11,
|
||||
BROTLI_DEFAULT_QUALITY: 11,
|
||||
BROTLI_MIN_WINDOW_BITS: 10,
|
||||
BROTLI_MAX_WINDOW_BITS: 24,
|
||||
BROTLI_LARGE_MAX_WINDOW_BITS: 30,
|
||||
BROTLI_DEFAULT_WINDOW: 22,
|
||||
BROTLI_MIN_INPUT_BLOCK_BITS: 16,
|
||||
BROTLI_MAX_INPUT_BLOCK_BITS: 24,
|
||||
BROTLI_PARAM_MODE: 0,
|
||||
BROTLI_PARAM_QUALITY: 1,
|
||||
BROTLI_PARAM_LGWIN: 2,
|
||||
BROTLI_PARAM_LGBLOCK: 3,
|
||||
BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: 4,
|
||||
BROTLI_PARAM_SIZE_HINT: 5,
|
||||
BROTLI_PARAM_LARGE_WINDOW: 6,
|
||||
BROTLI_PARAM_NPOSTFIX: 7,
|
||||
BROTLI_PARAM_NDIRECT: 8,
|
||||
BROTLI_DECODER_RESULT_ERROR: 0,
|
||||
BROTLI_DECODER_RESULT_SUCCESS: 1,
|
||||
BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: 2,
|
||||
BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: 3,
|
||||
BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: 0,
|
||||
BROTLI_DECODER_PARAM_LARGE_WINDOW: 1,
|
||||
BROTLI_DECODER_NO_ERROR: 0,
|
||||
BROTLI_DECODER_SUCCESS: 1,
|
||||
BROTLI_DECODER_NEEDS_MORE_INPUT: 2,
|
||||
BROTLI_DECODER_NEEDS_MORE_OUTPUT: 3,
|
||||
BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: -1,
|
||||
BROTLI_DECODER_ERROR_FORMAT_RESERVED: -2,
|
||||
BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: -3,
|
||||
BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: -4,
|
||||
BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: -5,
|
||||
BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: -6,
|
||||
BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: -7,
|
||||
BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: -8,
|
||||
BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: -9,
|
||||
BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: -10,
|
||||
BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: -11,
|
||||
BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: -12,
|
||||
BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: -13,
|
||||
BROTLI_DECODER_ERROR_FORMAT_PADDING_1: -14,
|
||||
BROTLI_DECODER_ERROR_FORMAT_PADDING_2: -15,
|
||||
BROTLI_DECODER_ERROR_FORMAT_DISTANCE: -16,
|
||||
BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: -19,
|
||||
BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: -20,
|
||||
BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: -21,
|
||||
BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: -22,
|
||||
BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: -25,
|
||||
BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: -26,
|
||||
BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: -27,
|
||||
BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: -30,
|
||||
BROTLI_DECODER_ERROR_UNREACHABLE: -31,
|
||||
}, realZlibConstants))
|
348
backend/apis/nodejs/node_modules/minizlib/index.js
generated
vendored
Normal file
348
backend/apis/nodejs/node_modules/minizlib/index.js
generated
vendored
Normal file
@ -0,0 +1,348 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const Buffer = require('buffer').Buffer
|
||||
const realZlib = require('zlib')
|
||||
|
||||
const constants = exports.constants = require('./constants.js')
|
||||
const Minipass = require('minipass')
|
||||
|
||||
const OriginalBufferConcat = Buffer.concat
|
||||
|
||||
const _superWrite = Symbol('_superWrite')
|
||||
class ZlibError extends Error {
|
||||
constructor (err) {
|
||||
super('zlib: ' + err.message)
|
||||
this.code = err.code
|
||||
this.errno = err.errno
|
||||
/* istanbul ignore if */
|
||||
if (!this.code)
|
||||
this.code = 'ZLIB_ERROR'
|
||||
|
||||
this.message = 'zlib: ' + err.message
|
||||
Error.captureStackTrace(this, this.constructor)
|
||||
}
|
||||
|
||||
get name () {
|
||||
return 'ZlibError'
|
||||
}
|
||||
}
|
||||
|
||||
// the Zlib class they all inherit from
|
||||
// This thing manages the queue of requests, and returns
|
||||
// true or false if there is anything in the queue when
|
||||
// you call the .write() method.
|
||||
const _opts = Symbol('opts')
|
||||
const _flushFlag = Symbol('flushFlag')
|
||||
const _finishFlushFlag = Symbol('finishFlushFlag')
|
||||
const _fullFlushFlag = Symbol('fullFlushFlag')
|
||||
const _handle = Symbol('handle')
|
||||
const _onError = Symbol('onError')
|
||||
const _sawError = Symbol('sawError')
|
||||
const _level = Symbol('level')
|
||||
const _strategy = Symbol('strategy')
|
||||
const _ended = Symbol('ended')
|
||||
const _defaultFullFlush = Symbol('_defaultFullFlush')
|
||||
|
||||
class ZlibBase extends Minipass {
|
||||
constructor (opts, mode) {
|
||||
if (!opts || typeof opts !== 'object')
|
||||
throw new TypeError('invalid options for ZlibBase constructor')
|
||||
|
||||
super(opts)
|
||||
this[_sawError] = false
|
||||
this[_ended] = false
|
||||
this[_opts] = opts
|
||||
|
||||
this[_flushFlag] = opts.flush
|
||||
this[_finishFlushFlag] = opts.finishFlush
|
||||
// this will throw if any options are invalid for the class selected
|
||||
try {
|
||||
this[_handle] = new realZlib[mode](opts)
|
||||
} catch (er) {
|
||||
// make sure that all errors get decorated properly
|
||||
throw new ZlibError(er)
|
||||
}
|
||||
|
||||
this[_onError] = (err) => {
|
||||
// no sense raising multiple errors, since we abort on the first one.
|
||||
if (this[_sawError])
|
||||
return
|
||||
|
||||
this[_sawError] = true
|
||||
|
||||
// there is no way to cleanly recover.
|
||||
// continuing only obscures problems.
|
||||
this.close()
|
||||
this.emit('error', err)
|
||||
}
|
||||
|
||||
this[_handle].on('error', er => this[_onError](new ZlibError(er)))
|
||||
this.once('end', () => this.close)
|
||||
}
|
||||
|
||||
close () {
|
||||
if (this[_handle]) {
|
||||
this[_handle].close()
|
||||
this[_handle] = null
|
||||
this.emit('close')
|
||||
}
|
||||
}
|
||||
|
||||
reset () {
|
||||
if (!this[_sawError]) {
|
||||
assert(this[_handle], 'zlib binding closed')
|
||||
return this[_handle].reset()
|
||||
}
|
||||
}
|
||||
|
||||
flush (flushFlag) {
|
||||
if (this.ended)
|
||||
return
|
||||
|
||||
if (typeof flushFlag !== 'number')
|
||||
flushFlag = this[_fullFlushFlag]
|
||||
this.write(Object.assign(Buffer.alloc(0), { [_flushFlag]: flushFlag }))
|
||||
}
|
||||
|
||||
end (chunk, encoding, cb) {
|
||||
if (chunk)
|
||||
this.write(chunk, encoding)
|
||||
this.flush(this[_finishFlushFlag])
|
||||
this[_ended] = true
|
||||
return super.end(null, null, cb)
|
||||
}
|
||||
|
||||
get ended () {
|
||||
return this[_ended]
|
||||
}
|
||||
|
||||
write (chunk, encoding, cb) {
|
||||
// process the chunk using the sync process
|
||||
// then super.write() all the outputted chunks
|
||||
if (typeof encoding === 'function')
|
||||
cb = encoding, encoding = 'utf8'
|
||||
|
||||
if (typeof chunk === 'string')
|
||||
chunk = Buffer.from(chunk, encoding)
|
||||
|
||||
if (this[_sawError])
|
||||
return
|
||||
assert(this[_handle], 'zlib binding closed')
|
||||
|
||||
// _processChunk tries to .close() the native handle after it's done, so we
|
||||
// intercept that by temporarily making it a no-op.
|
||||
const nativeHandle = this[_handle]._handle
|
||||
const originalNativeClose = nativeHandle.close
|
||||
nativeHandle.close = () => {}
|
||||
const originalClose = this[_handle].close
|
||||
this[_handle].close = () => {}
|
||||
// It also calls `Buffer.concat()` at the end, which may be convenient
|
||||
// for some, but which we are not interested in as it slows us down.
|
||||
Buffer.concat = (args) => args
|
||||
let result
|
||||
try {
|
||||
const flushFlag = typeof chunk[_flushFlag] === 'number'
|
||||
? chunk[_flushFlag] : this[_flushFlag]
|
||||
result = this[_handle]._processChunk(chunk, flushFlag)
|
||||
// if we don't throw, reset it back how it was
|
||||
Buffer.concat = OriginalBufferConcat
|
||||
} catch (err) {
|
||||
// or if we do, put Buffer.concat() back before we emit error
|
||||
// Error events call into user code, which may call Buffer.concat()
|
||||
Buffer.concat = OriginalBufferConcat
|
||||
this[_onError](new ZlibError(err))
|
||||
} finally {
|
||||
if (this[_handle]) {
|
||||
// Core zlib resets `_handle` to null after attempting to close the
|
||||
// native handle. Our no-op handler prevented actual closure, but we
|
||||
// need to restore the `._handle` property.
|
||||
this[_handle]._handle = nativeHandle
|
||||
nativeHandle.close = originalNativeClose
|
||||
this[_handle].close = originalClose
|
||||
// `_processChunk()` adds an 'error' listener. If we don't remove it
|
||||
// after each call, these handlers start piling up.
|
||||
this[_handle].removeAllListeners('error')
|
||||
// make sure OUR error listener is still attached tho
|
||||
}
|
||||
}
|
||||
|
||||
if (this[_handle])
|
||||
this[_handle].on('error', er => this[_onError](new ZlibError(er)))
|
||||
|
||||
let writeReturn
|
||||
if (result) {
|
||||
if (Array.isArray(result) && result.length > 0) {
|
||||
// The first buffer is always `handle._outBuffer`, which would be
|
||||
// re-used for later invocations; so, we always have to copy that one.
|
||||
writeReturn = this[_superWrite](Buffer.from(result[0]))
|
||||
for (let i = 1; i < result.length; i++) {
|
||||
writeReturn = this[_superWrite](result[i])
|
||||
}
|
||||
} else {
|
||||
writeReturn = this[_superWrite](Buffer.from(result))
|
||||
}
|
||||
}
|
||||
|
||||
if (cb)
|
||||
cb()
|
||||
return writeReturn
|
||||
}
|
||||
|
||||
[_superWrite] (data) {
|
||||
return super.write(data)
|
||||
}
|
||||
}
|
||||
|
||||
class Zlib extends ZlibBase {
|
||||
constructor (opts, mode) {
|
||||
opts = opts || {}
|
||||
|
||||
opts.flush = opts.flush || constants.Z_NO_FLUSH
|
||||
opts.finishFlush = opts.finishFlush || constants.Z_FINISH
|
||||
super(opts, mode)
|
||||
|
||||
this[_fullFlushFlag] = constants.Z_FULL_FLUSH
|
||||
this[_level] = opts.level
|
||||
this[_strategy] = opts.strategy
|
||||
}
|
||||
|
||||
params (level, strategy) {
|
||||
if (this[_sawError])
|
||||
return
|
||||
|
||||
if (!this[_handle])
|
||||
throw new Error('cannot switch params when binding is closed')
|
||||
|
||||
// no way to test this without also not supporting params at all
|
||||
/* istanbul ignore if */
|
||||
if (!this[_handle].params)
|
||||
throw new Error('not supported in this implementation')
|
||||
|
||||
if (this[_level] !== level || this[_strategy] !== strategy) {
|
||||
this.flush(constants.Z_SYNC_FLUSH)
|
||||
assert(this[_handle], 'zlib binding closed')
|
||||
// .params() calls .flush(), but the latter is always async in the
|
||||
// core zlib. We override .flush() temporarily to intercept that and
|
||||
// flush synchronously.
|
||||
const origFlush = this[_handle].flush
|
||||
this[_handle].flush = (flushFlag, cb) => {
|
||||
this.flush(flushFlag)
|
||||
cb()
|
||||
}
|
||||
try {
|
||||
this[_handle].params(level, strategy)
|
||||
} finally {
|
||||
this[_handle].flush = origFlush
|
||||
}
|
||||
/* istanbul ignore else */
|
||||
if (this[_handle]) {
|
||||
this[_level] = level
|
||||
this[_strategy] = strategy
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// minimal 2-byte header
|
||||
class Deflate extends Zlib {
|
||||
constructor (opts) {
|
||||
super(opts, 'Deflate')
|
||||
}
|
||||
}
|
||||
|
||||
class Inflate extends Zlib {
|
||||
constructor (opts) {
|
||||
super(opts, 'Inflate')
|
||||
}
|
||||
}
|
||||
|
||||
// gzip - bigger header, same deflate compression
|
||||
const _portable = Symbol('_portable')
|
||||
class Gzip extends Zlib {
|
||||
constructor (opts) {
|
||||
super(opts, 'Gzip')
|
||||
this[_portable] = opts && !!opts.portable
|
||||
}
|
||||
|
||||
[_superWrite] (data) {
|
||||
if (!this[_portable])
|
||||
return super[_superWrite](data)
|
||||
|
||||
// we'll always get the header emitted in one first chunk
|
||||
// overwrite the OS indicator byte with 0xFF
|
||||
this[_portable] = false
|
||||
data[9] = 255
|
||||
return super[_superWrite](data)
|
||||
}
|
||||
}
|
||||
|
||||
class Gunzip extends Zlib {
|
||||
constructor (opts) {
|
||||
super(opts, 'Gunzip')
|
||||
}
|
||||
}
|
||||
|
||||
// raw - no header
|
||||
class DeflateRaw extends Zlib {
|
||||
constructor (opts) {
|
||||
super(opts, 'DeflateRaw')
|
||||
}
|
||||
}
|
||||
|
||||
class InflateRaw extends Zlib {
|
||||
constructor (opts) {
|
||||
super(opts, 'InflateRaw')
|
||||
}
|
||||
}
|
||||
|
||||
// auto-detect header.
|
||||
class Unzip extends Zlib {
|
||||
constructor (opts) {
|
||||
super(opts, 'Unzip')
|
||||
}
|
||||
}
|
||||
|
||||
class Brotli extends ZlibBase {
|
||||
constructor (opts, mode) {
|
||||
opts = opts || {}
|
||||
|
||||
opts.flush = opts.flush || constants.BROTLI_OPERATION_PROCESS
|
||||
opts.finishFlush = opts.finishFlush || constants.BROTLI_OPERATION_FINISH
|
||||
|
||||
super(opts, mode)
|
||||
|
||||
this[_fullFlushFlag] = constants.BROTLI_OPERATION_FLUSH
|
||||
}
|
||||
}
|
||||
|
||||
class BrotliCompress extends Brotli {
|
||||
constructor (opts) {
|
||||
super(opts, 'BrotliCompress')
|
||||
}
|
||||
}
|
||||
|
||||
class BrotliDecompress extends Brotli {
|
||||
constructor (opts) {
|
||||
super(opts, 'BrotliDecompress')
|
||||
}
|
||||
}
|
||||
|
||||
exports.Deflate = Deflate
|
||||
exports.Inflate = Inflate
|
||||
exports.Gzip = Gzip
|
||||
exports.Gunzip = Gunzip
|
||||
exports.DeflateRaw = DeflateRaw
|
||||
exports.InflateRaw = InflateRaw
|
||||
exports.Unzip = Unzip
|
||||
/* istanbul ignore else */
|
||||
if (typeof realZlib.BrotliCompress === 'function') {
|
||||
exports.BrotliCompress = BrotliCompress
|
||||
exports.BrotliDecompress = BrotliDecompress
|
||||
} else {
|
||||
exports.BrotliCompress = exports.BrotliDecompress = class {
|
||||
constructor () {
|
||||
throw new Error('Brotli is not supported in this version of Node.js')
|
||||
}
|
||||
}
|
||||
}
|
15
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/LICENSE
generated
vendored
Normal file
15
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/LICENSE
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
The ISC License
|
||||
|
||||
Copyright (c) 2017-2022 npm, Inc., Isaac Z. Schlueter, and Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
|
||||
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
728
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/README.md
generated
vendored
Normal file
728
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/README.md
generated
vendored
Normal file
@ -0,0 +1,728 @@
|
||||
# minipass
|
||||
|
||||
A _very_ minimal implementation of a [PassThrough
|
||||
stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
|
||||
|
||||
[It's very
|
||||
fast](https://docs.google.com/spreadsheets/d/1oObKSrVwLX_7Ut4Z6g3fZW-AX1j1-k6w-cDsrkaSbHM/edit#gid=0)
|
||||
for objects, strings, and buffers.
|
||||
|
||||
Supports `pipe()`ing (including multi-`pipe()` and backpressure transmission),
|
||||
buffering data until either a `data` event handler or `pipe()` is added (so
|
||||
you don't lose the first chunk), and most other cases where PassThrough is
|
||||
a good idea.
|
||||
|
||||
There is a `read()` method, but it's much more efficient to consume data
|
||||
from this stream via `'data'` events or by calling `pipe()` into some other
|
||||
stream. Calling `read()` requires the buffer to be flattened in some
|
||||
cases, which requires copying memory.
|
||||
|
||||
If you set `objectMode: true` in the options, then whatever is written will
|
||||
be emitted. Otherwise, it'll do a minimal amount of Buffer copying to
|
||||
ensure proper Streams semantics when `read(n)` is called.
|
||||
|
||||
`objectMode` can also be set by doing `stream.objectMode = true`, or by
|
||||
writing any non-string/non-buffer data. `objectMode` cannot be set to
|
||||
false once it is set.
|
||||
|
||||
This is not a `through` or `through2` stream. It doesn't transform the
|
||||
data, it just passes it right through. If you want to transform the data,
|
||||
extend the class, and override the `write()` method. Once you're done
|
||||
transforming the data however you want, call `super.write()` with the
|
||||
transform output.
|
||||
|
||||
For some examples of streams that extend Minipass in various ways, check
|
||||
out:
|
||||
|
||||
- [minizlib](http://npm.im/minizlib)
|
||||
- [fs-minipass](http://npm.im/fs-minipass)
|
||||
- [tar](http://npm.im/tar)
|
||||
- [minipass-collect](http://npm.im/minipass-collect)
|
||||
- [minipass-flush](http://npm.im/minipass-flush)
|
||||
- [minipass-pipeline](http://npm.im/minipass-pipeline)
|
||||
- [tap](http://npm.im/tap)
|
||||
- [tap-parser](http://npm.im/tap-parser)
|
||||
- [treport](http://npm.im/treport)
|
||||
- [minipass-fetch](http://npm.im/minipass-fetch)
|
||||
- [pacote](http://npm.im/pacote)
|
||||
- [make-fetch-happen](http://npm.im/make-fetch-happen)
|
||||
- [cacache](http://npm.im/cacache)
|
||||
- [ssri](http://npm.im/ssri)
|
||||
- [npm-registry-fetch](http://npm.im/npm-registry-fetch)
|
||||
- [minipass-json-stream](http://npm.im/minipass-json-stream)
|
||||
- [minipass-sized](http://npm.im/minipass-sized)
|
||||
|
||||
## Differences from Node.js Streams
|
||||
|
||||
There are several things that make Minipass streams different from (and in
|
||||
some ways superior to) Node.js core streams.
|
||||
|
||||
Please read these caveats if you are familiar with node-core streams and
|
||||
intend to use Minipass streams in your programs.
|
||||
|
||||
You can avoid most of these differences entirely (for a very
|
||||
small performance penalty) by setting `{async: true}` in the
|
||||
constructor options.
|
||||
|
||||
### Timing
|
||||
|
||||
Minipass streams are designed to support synchronous use-cases. Thus, data
|
||||
is emitted as soon as it is available, always. It is buffered until read,
|
||||
but no longer. Another way to look at it is that Minipass streams are
|
||||
exactly as synchronous as the logic that writes into them.
|
||||
|
||||
This can be surprising if your code relies on `PassThrough.write()` always
|
||||
providing data on the next tick rather than the current one, or being able
|
||||
to call `resume()` and not have the entire buffer disappear immediately.
|
||||
|
||||
However, without this synchronicity guarantee, there would be no way for
|
||||
Minipass to achieve the speeds it does, or support the synchronous use
|
||||
cases that it does. Simply put, waiting takes time.
|
||||
|
||||
This non-deferring approach makes Minipass streams much easier to reason
|
||||
about, especially in the context of Promises and other flow-control
|
||||
mechanisms.
|
||||
|
||||
Example:
|
||||
|
||||
```js
|
||||
const Minipass = require('minipass')
|
||||
const stream = new Minipass({ async: true })
|
||||
stream.on('data', () => console.log('data event'))
|
||||
console.log('before write')
|
||||
stream.write('hello')
|
||||
console.log('after write')
|
||||
// output:
|
||||
// before write
|
||||
// data event
|
||||
// after write
|
||||
```
|
||||
|
||||
### Exception: Async Opt-In
|
||||
|
||||
If you wish to have a Minipass stream with behavior that more
|
||||
closely mimics Node.js core streams, you can set the stream in
|
||||
async mode either by setting `async: true` in the constructor
|
||||
options, or by setting `stream.async = true` later on.
|
||||
|
||||
```js
|
||||
const Minipass = require('minipass')
|
||||
const asyncStream = new Minipass({ async: true })
|
||||
asyncStream.on('data', () => console.log('data event'))
|
||||
console.log('before write')
|
||||
asyncStream.write('hello')
|
||||
console.log('after write')
|
||||
// output:
|
||||
// before write
|
||||
// after write
|
||||
// data event <-- this is deferred until the next tick
|
||||
```
|
||||
|
||||
Switching _out_ of async mode is unsafe, as it could cause data
|
||||
corruption, and so is not enabled. Example:
|
||||
|
||||
```js
|
||||
const Minipass = require('minipass')
|
||||
const stream = new Minipass({ encoding: 'utf8' })
|
||||
stream.on('data', chunk => console.log(chunk))
|
||||
stream.async = true
|
||||
console.log('before writes')
|
||||
stream.write('hello')
|
||||
setStreamSyncAgainSomehow(stream) // <-- this doesn't actually exist!
|
||||
stream.write('world')
|
||||
console.log('after writes')
|
||||
// hypothetical output would be:
|
||||
// before writes
|
||||
// world
|
||||
// after writes
|
||||
// hello
|
||||
// NOT GOOD!
|
||||
```
|
||||
|
||||
To avoid this problem, once set into async mode, any attempt to
|
||||
make the stream sync again will be ignored.
|
||||
|
||||
```js
|
||||
const Minipass = require('minipass')
|
||||
const stream = new Minipass({ encoding: 'utf8' })
|
||||
stream.on('data', chunk => console.log(chunk))
|
||||
stream.async = true
|
||||
console.log('before writes')
|
||||
stream.write('hello')
|
||||
stream.async = false // <-- no-op, stream already async
|
||||
stream.write('world')
|
||||
console.log('after writes')
|
||||
// actual output:
|
||||
// before writes
|
||||
// after writes
|
||||
// hello
|
||||
// world
|
||||
```
|
||||
|
||||
### No High/Low Water Marks
|
||||
|
||||
Node.js core streams will optimistically fill up a buffer, returning `true`
|
||||
on all writes until the limit is hit, even if the data has nowhere to go.
|
||||
Then, they will not attempt to draw more data in until the buffer size dips
|
||||
below a minimum value.
|
||||
|
||||
Minipass streams are much simpler. The `write()` method will return `true`
|
||||
if the data has somewhere to go (which is to say, given the timing
|
||||
guarantees, that the data is already there by the time `write()` returns).
|
||||
|
||||
If the data has nowhere to go, then `write()` returns false, and the data
|
||||
sits in a buffer, to be drained out immediately as soon as anyone consumes
|
||||
it.
|
||||
|
||||
Since nothing is ever buffered unnecessarily, there is much less
|
||||
copying data, and less bookkeeping about buffer capacity levels.
|
||||
|
||||
### Hazards of Buffering (or: Why Minipass Is So Fast)
|
||||
|
||||
Since data written to a Minipass stream is immediately written all the way
|
||||
through the pipeline, and `write()` always returns true/false based on
|
||||
whether the data was fully flushed, backpressure is communicated
|
||||
immediately to the upstream caller. This minimizes buffering.
|
||||
|
||||
Consider this case:
|
||||
|
||||
```js
|
||||
const {PassThrough} = require('stream')
|
||||
const p1 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p2 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p3 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p4 = new PassThrough({ highWaterMark: 1024 })
|
||||
|
||||
p1.pipe(p2).pipe(p3).pipe(p4)
|
||||
p4.on('data', () => console.log('made it through'))
|
||||
|
||||
// this returns false and buffers, then writes to p2 on next tick (1)
|
||||
// p2 returns false and buffers, pausing p1, then writes to p3 on next tick (2)
|
||||
// p3 returns false and buffers, pausing p2, then writes to p4 on next tick (3)
|
||||
// p4 returns false and buffers, pausing p3, then emits 'data' and 'drain'
|
||||
// on next tick (4)
|
||||
// p3 sees p4's 'drain' event, and calls resume(), emitting 'resume' and
|
||||
// 'drain' on next tick (5)
|
||||
// p2 sees p3's 'drain', calls resume(), emits 'resume' and 'drain' on next tick (6)
|
||||
// p1 sees p2's 'drain', calls resume(), emits 'resume' and 'drain' on next
|
||||
// tick (7)
|
||||
|
||||
p1.write(Buffer.alloc(2048)) // returns false
|
||||
```
|
||||
|
||||
Along the way, the data was buffered and deferred at each stage, and
|
||||
multiple event deferrals happened, for an unblocked pipeline where it was
|
||||
perfectly safe to write all the way through!
|
||||
|
||||
Furthermore, setting a `highWaterMark` of `1024` might lead someone reading
|
||||
the code to think an advisory maximum of 1KiB is being set for the
|
||||
pipeline. However, the actual advisory buffering level is the _sum_ of
|
||||
`highWaterMark` values, since each one has its own bucket.
|
||||
|
||||
Consider the Minipass case:
|
||||
|
||||
```js
|
||||
const m1 = new Minipass()
|
||||
const m2 = new Minipass()
|
||||
const m3 = new Minipass()
|
||||
const m4 = new Minipass()
|
||||
|
||||
m1.pipe(m2).pipe(m3).pipe(m4)
|
||||
m4.on('data', () => console.log('made it through'))
|
||||
|
||||
// m1 is flowing, so it writes the data to m2 immediately
|
||||
// m2 is flowing, so it writes the data to m3 immediately
|
||||
// m3 is flowing, so it writes the data to m4 immediately
|
||||
// m4 is flowing, so it fires the 'data' event immediately, returns true
|
||||
// m4's write returned true, so m3 is still flowing, returns true
|
||||
// m3's write returned true, so m2 is still flowing, returns true
|
||||
// m2's write returned true, so m1 is still flowing, returns true
|
||||
// No event deferrals or buffering along the way!
|
||||
|
||||
m1.write(Buffer.alloc(2048)) // returns true
|
||||
```
|
||||
|
||||
It is extremely unlikely that you _don't_ want to buffer any data written,
|
||||
or _ever_ buffer data that can be flushed all the way through. Neither
|
||||
node-core streams nor Minipass ever fail to buffer written data, but
|
||||
node-core streams do a lot of unnecessary buffering and pausing.
|
||||
|
||||
As always, the faster implementation is the one that does less stuff and
|
||||
waits less time to do it.
|
||||
|
||||
### Immediately emit `end` for empty streams (when not paused)
|
||||
|
||||
If a stream is not paused, and `end()` is called before writing any data
|
||||
into it, then it will emit `end` immediately.
|
||||
|
||||
If you have logic that occurs on the `end` event which you don't want to
|
||||
potentially happen immediately (for example, closing file descriptors,
|
||||
moving on to the next entry in an archive parse stream, etc.) then be sure
|
||||
to call `stream.pause()` on creation, and then `stream.resume()` once you
|
||||
are ready to respond to the `end` event.
|
||||
|
||||
However, this is _usually_ not a problem because:
|
||||
|
||||
### Emit `end` When Asked
|
||||
|
||||
One hazard of immediately emitting `'end'` is that you may not yet have had
|
||||
a chance to add a listener. In order to avoid this hazard, Minipass
|
||||
streams safely re-emit the `'end'` event if a new listener is added after
|
||||
`'end'` has been emitted.
|
||||
|
||||
Ie, if you do `stream.on('end', someFunction)`, and the stream has already
|
||||
emitted `end`, then it will call the handler right away. (You can think of
|
||||
this somewhat like attaching a new `.then(fn)` to a previously-resolved
|
||||
Promise.)
|
||||
|
||||
To prevent calling handlers multiple times who would not expect multiple
|
||||
ends to occur, all listeners are removed from the `'end'` event whenever it
|
||||
is emitted.
|
||||
|
||||
### Emit `error` When Asked
|
||||
|
||||
The most recent error object passed to the `'error'` event is
|
||||
stored on the stream. If a new `'error'` event handler is added,
|
||||
and an error was previously emitted, then the event handler will
|
||||
be called immediately (or on `process.nextTick` in the case of
|
||||
async streams).
|
||||
|
||||
This makes it much more difficult to end up trying to interact
|
||||
with a broken stream, if the error handler is added after an
|
||||
error was previously emitted.
|
||||
|
||||
### Impact of "immediate flow" on Tee-streams
|
||||
|
||||
A "tee stream" is a stream piping to multiple destinations:
|
||||
|
||||
```js
|
||||
const tee = new Minipass()
|
||||
t.pipe(dest1)
|
||||
t.pipe(dest2)
|
||||
t.write('foo') // goes to both destinations
|
||||
```
|
||||
|
||||
Since Minipass streams _immediately_ process any pending data through the
|
||||
pipeline when a new pipe destination is added, this can have surprising
|
||||
effects, especially when a stream comes in from some other function and may
|
||||
or may not have data in its buffer.
|
||||
|
||||
```js
|
||||
// WARNING! WILL LOSE DATA!
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
src.pipe(dest1) // 'foo' chunk flows to dest1 immediately, and is gone
|
||||
src.pipe(dest2) // gets nothing!
|
||||
```
|
||||
|
||||
One solution is to create a dedicated tee-stream junction that pipes to
|
||||
both locations, and then pipe to _that_ instead.
|
||||
|
||||
```js
|
||||
// Safe example: tee to both places
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
const tee = new Minipass()
|
||||
tee.pipe(dest1)
|
||||
tee.pipe(dest2)
|
||||
src.pipe(tee) // tee gets 'foo', pipes to both locations
|
||||
```
|
||||
|
||||
The same caveat applies to `on('data')` event listeners. The first one
|
||||
added will _immediately_ receive all of the data, leaving nothing for the
|
||||
second:
|
||||
|
||||
```js
|
||||
// WARNING! WILL LOSE DATA!
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
src.on('data', handler1) // receives 'foo' right away
|
||||
src.on('data', handler2) // nothing to see here!
|
||||
```
|
||||
|
||||
Using a dedicated tee-stream can be used in this case as well:
|
||||
|
||||
```js
|
||||
// Safe example: tee to both data handlers
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
const tee = new Minipass()
|
||||
tee.on('data', handler1)
|
||||
tee.on('data', handler2)
|
||||
src.pipe(tee)
|
||||
```
|
||||
|
||||
All of the hazards in this section are avoided by setting `{
|
||||
async: true }` in the Minipass constructor, or by setting
|
||||
`stream.async = true` afterwards. Note that this does add some
|
||||
overhead, so should only be done in cases where you are willing
|
||||
to lose a bit of performance in order to avoid having to refactor
|
||||
program logic.
|
||||
|
||||
## USAGE
|
||||
|
||||
It's a stream! Use it like a stream and it'll most likely do what you
|
||||
want.
|
||||
|
||||
```js
|
||||
const Minipass = require('minipass')
|
||||
const mp = new Minipass(options) // optional: { encoding, objectMode }
|
||||
mp.write('foo')
|
||||
mp.pipe(someOtherStream)
|
||||
mp.end('bar')
|
||||
```
|
||||
|
||||
### OPTIONS
|
||||
|
||||
* `encoding` How would you like the data coming _out_ of the stream to be
|
||||
encoded? Accepts any values that can be passed to `Buffer.toString()`.
|
||||
* `objectMode` Emit data exactly as it comes in. This will be flipped on
|
||||
by default if you write() something other than a string or Buffer at any
|
||||
point. Setting `objectMode: true` will prevent setting any encoding
|
||||
value.
|
||||
* `async` Defaults to `false`. Set to `true` to defer data
|
||||
emission until next tick. This reduces performance slightly,
|
||||
but makes Minipass streams use timing behavior closer to Node
|
||||
core streams. See [Timing](#timing) for more details.
|
||||
|
||||
### API
|
||||
|
||||
Implements the user-facing portions of Node.js's `Readable` and `Writable`
|
||||
streams.
|
||||
|
||||
### Methods
|
||||
|
||||
* `write(chunk, [encoding], [callback])` - Put data in. (Note that, in the
|
||||
base Minipass class, the same data will come out.) Returns `false` if
|
||||
the stream will buffer the next write, or true if it's still in "flowing"
|
||||
mode.
|
||||
* `end([chunk, [encoding]], [callback])` - Signal that you have no more
|
||||
data to write. This will queue an `end` event to be fired when all the
|
||||
data has been consumed.
|
||||
* `setEncoding(encoding)` - Set the encoding for data coming of the stream.
|
||||
This can only be done once.
|
||||
* `pause()` - No more data for a while, please. This also prevents `end`
|
||||
from being emitted for empty streams until the stream is resumed.
|
||||
* `resume()` - Resume the stream. If there's data in the buffer, it is all
|
||||
discarded. Any buffered events are immediately emitted.
|
||||
* `pipe(dest)` - Send all output to the stream provided. When
|
||||
data is emitted, it is immediately written to any and all pipe
|
||||
destinations. (Or written on next tick in `async` mode.)
|
||||
* `unpipe(dest)` - Stop piping to the destination stream. This
|
||||
is immediate, meaning that any asynchronously queued data will
|
||||
_not_ make it to the destination when running in `async` mode.
|
||||
* `options.end` - Boolean, end the destination stream when
|
||||
the source stream ends. Default `true`.
|
||||
* `options.proxyErrors` - Boolean, proxy `error` events from
|
||||
the source stream to the destination stream. Note that
|
||||
errors are _not_ proxied after the pipeline terminates,
|
||||
either due to the source emitting `'end'` or manually
|
||||
unpiping with `src.unpipe(dest)`. Default `false`.
|
||||
* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters. Some
|
||||
events are given special treatment, however. (See below under "events".)
|
||||
* `promise()` - Returns a Promise that resolves when the stream emits
|
||||
`end`, or rejects if the stream emits `error`.
|
||||
* `collect()` - Return a Promise that resolves on `end` with an array
|
||||
containing each chunk of data that was emitted, or rejects if the stream
|
||||
emits `error`. Note that this consumes the stream data.
|
||||
* `concat()` - Same as `collect()`, but concatenates the data into a single
|
||||
Buffer object. Will reject the returned promise if the stream is in
|
||||
objectMode, or if it goes into objectMode by the end of the data.
|
||||
* `read(n)` - Consume `n` bytes of data out of the buffer. If `n` is not
|
||||
provided, then consume all of it. If `n` bytes are not available, then
|
||||
it returns null. **Note** consuming streams in this way is less
|
||||
efficient, and can lead to unnecessary Buffer copying.
|
||||
* `destroy([er])` - Destroy the stream. If an error is provided, then an
|
||||
`'error'` event is emitted. If the stream has a `close()` method, and
|
||||
has not emitted a `'close'` event yet, then `stream.close()` will be
|
||||
called. Any Promises returned by `.promise()`, `.collect()` or
|
||||
`.concat()` will be rejected. After being destroyed, writing to the
|
||||
stream will emit an error. No more data will be emitted if the stream is
|
||||
destroyed, even if it was previously buffered.
|
||||
|
||||
### Properties
|
||||
|
||||
* `bufferLength` Read-only. Total number of bytes buffered, or in the case
|
||||
of objectMode, the total number of objects.
|
||||
* `encoding` The encoding that has been set. (Setting this is equivalent
|
||||
to calling `setEncoding(enc)` and has the same prohibition against
|
||||
setting multiple times.)
|
||||
* `flowing` Read-only. Boolean indicating whether a chunk written to the
|
||||
stream will be immediately emitted.
|
||||
* `emittedEnd` Read-only. Boolean indicating whether the end-ish events
|
||||
(ie, `end`, `prefinish`, `finish`) have been emitted. Note that
|
||||
listening on any end-ish event will immediateyl re-emit it if it has
|
||||
already been emitted.
|
||||
* `writable` Whether the stream is writable. Default `true`. Set to
|
||||
`false` when `end()`
|
||||
* `readable` Whether the stream is readable. Default `true`.
|
||||
* `buffer` A [yallist](http://npm.im/yallist) linked list of chunks written
|
||||
to the stream that have not yet been emitted. (It's probably a bad idea
|
||||
to mess with this.)
|
||||
* `pipes` A [yallist](http://npm.im/yallist) linked list of streams that
|
||||
this stream is piping into. (It's probably a bad idea to mess with
|
||||
this.)
|
||||
* `destroyed` A getter that indicates whether the stream was destroyed.
|
||||
* `paused` True if the stream has been explicitly paused, otherwise false.
|
||||
* `objectMode` Indicates whether the stream is in `objectMode`. Once set
|
||||
to `true`, it cannot be set to `false`.
|
||||
|
||||
### Events
|
||||
|
||||
* `data` Emitted when there's data to read. Argument is the data to read.
|
||||
This is never emitted while not flowing. If a listener is attached, that
|
||||
will resume the stream.
|
||||
* `end` Emitted when there's no more data to read. This will be emitted
|
||||
immediately for empty streams when `end()` is called. If a listener is
|
||||
attached, and `end` was already emitted, then it will be emitted again.
|
||||
All listeners are removed when `end` is emitted.
|
||||
* `prefinish` An end-ish event that follows the same logic as `end` and is
|
||||
emitted in the same conditions where `end` is emitted. Emitted after
|
||||
`'end'`.
|
||||
* `finish` An end-ish event that follows the same logic as `end` and is
|
||||
emitted in the same conditions where `end` is emitted. Emitted after
|
||||
`'prefinish'`.
|
||||
* `close` An indication that an underlying resource has been released.
|
||||
Minipass does not emit this event, but will defer it until after `end`
|
||||
has been emitted, since it throws off some stream libraries otherwise.
|
||||
* `drain` Emitted when the internal buffer empties, and it is again
|
||||
suitable to `write()` into the stream.
|
||||
* `readable` Emitted when data is buffered and ready to be read by a
|
||||
consumer.
|
||||
* `resume` Emitted when stream changes state from buffering to flowing
|
||||
mode. (Ie, when `resume` is called, `pipe` is called, or a `data` event
|
||||
listener is added.)
|
||||
|
||||
### Static Methods
|
||||
|
||||
* `Minipass.isStream(stream)` Returns `true` if the argument is a stream,
|
||||
and false otherwise. To be considered a stream, the object must be
|
||||
either an instance of Minipass, or an EventEmitter that has either a
|
||||
`pipe()` method, or both `write()` and `end()` methods. (Pretty much any
|
||||
stream in node-land will return `true` for this.)
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Here are some examples of things you can do with Minipass streams.
|
||||
|
||||
### simple "are you done yet" promise
|
||||
|
||||
```js
|
||||
mp.promise().then(() => {
|
||||
// stream is finished
|
||||
}, er => {
|
||||
// stream emitted an error
|
||||
})
|
||||
```
|
||||
|
||||
### collecting
|
||||
|
||||
```js
|
||||
mp.collect().then(all => {
|
||||
// all is an array of all the data emitted
|
||||
// encoding is supported in this case, so
|
||||
// so the result will be a collection of strings if
|
||||
// an encoding is specified, or buffers/objects if not.
|
||||
//
|
||||
// In an async function, you may do
|
||||
// const data = await stream.collect()
|
||||
})
|
||||
```
|
||||
|
||||
### collecting into a single blob
|
||||
|
||||
This is a bit slower because it concatenates the data into one chunk for
|
||||
you, but if you're going to do it yourself anyway, it's convenient this
|
||||
way:
|
||||
|
||||
```js
|
||||
mp.concat().then(onebigchunk => {
|
||||
// onebigchunk is a string if the stream
|
||||
// had an encoding set, or a buffer otherwise.
|
||||
})
|
||||
```
|
||||
|
||||
### iteration
|
||||
|
||||
You can iterate over streams synchronously or asynchronously in platforms
|
||||
that support it.
|
||||
|
||||
Synchronous iteration will end when the currently available data is
|
||||
consumed, even if the `end` event has not been reached. In string and
|
||||
buffer mode, the data is concatenated, so unless multiple writes are
|
||||
occurring in the same tick as the `read()`, sync iteration loops will
|
||||
generally only have a single iteration.
|
||||
|
||||
To consume chunks in this way exactly as they have been written, with no
|
||||
flattening, create the stream with the `{ objectMode: true }` option.
|
||||
|
||||
```js
|
||||
const mp = new Minipass({ objectMode: true })
|
||||
mp.write('a')
|
||||
mp.write('b')
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // a, b
|
||||
}
|
||||
mp.write('c')
|
||||
mp.write('d')
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // c, d
|
||||
}
|
||||
mp.write('e')
|
||||
mp.end()
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // e
|
||||
}
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // nothing
|
||||
}
|
||||
```
|
||||
|
||||
Asynchronous iteration will continue until the end event is reached,
|
||||
consuming all of the data.
|
||||
|
||||
```js
|
||||
const mp = new Minipass({ encoding: 'utf8' })
|
||||
|
||||
// some source of some data
|
||||
let i = 5
|
||||
const inter = setInterval(() => {
|
||||
if (i-- > 0)
|
||||
mp.write(Buffer.from('foo\n', 'utf8'))
|
||||
else {
|
||||
mp.end()
|
||||
clearInterval(inter)
|
||||
}
|
||||
}, 100)
|
||||
|
||||
// consume the data with asynchronous iteration
|
||||
async function consume () {
|
||||
for await (let chunk of mp) {
|
||||
console.log(chunk)
|
||||
}
|
||||
return 'ok'
|
||||
}
|
||||
|
||||
consume().then(res => console.log(res))
|
||||
// logs `foo\n` 5 times, and then `ok`
|
||||
```
|
||||
|
||||
### subclass that `console.log()`s everything written into it
|
||||
|
||||
```js
|
||||
class Logger extends Minipass {
|
||||
write (chunk, encoding, callback) {
|
||||
console.log('WRITE', chunk, encoding)
|
||||
return super.write(chunk, encoding, callback)
|
||||
}
|
||||
end (chunk, encoding, callback) {
|
||||
console.log('END', chunk, encoding)
|
||||
return super.end(chunk, encoding, callback)
|
||||
}
|
||||
}
|
||||
|
||||
someSource.pipe(new Logger()).pipe(someDest)
|
||||
```
|
||||
|
||||
### same thing, but using an inline anonymous class
|
||||
|
||||
```js
|
||||
// js classes are fun
|
||||
someSource
|
||||
.pipe(new (class extends Minipass {
|
||||
emit (ev, ...data) {
|
||||
// let's also log events, because debugging some weird thing
|
||||
console.log('EMIT', ev)
|
||||
return super.emit(ev, ...data)
|
||||
}
|
||||
write (chunk, encoding, callback) {
|
||||
console.log('WRITE', chunk, encoding)
|
||||
return super.write(chunk, encoding, callback)
|
||||
}
|
||||
end (chunk, encoding, callback) {
|
||||
console.log('END', chunk, encoding)
|
||||
return super.end(chunk, encoding, callback)
|
||||
}
|
||||
}))
|
||||
.pipe(someDest)
|
||||
```
|
||||
|
||||
### subclass that defers 'end' for some reason
|
||||
|
||||
```js
|
||||
class SlowEnd extends Minipass {
|
||||
emit (ev, ...args) {
|
||||
if (ev === 'end') {
|
||||
console.log('going to end, hold on a sec')
|
||||
setTimeout(() => {
|
||||
console.log('ok, ready to end now')
|
||||
super.emit('end', ...args)
|
||||
}, 100)
|
||||
} else {
|
||||
return super.emit(ev, ...args)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### transform that creates newline-delimited JSON
|
||||
|
||||
```js
|
||||
class NDJSONEncode extends Minipass {
|
||||
write (obj, cb) {
|
||||
try {
|
||||
// JSON.stringify can throw, emit an error on that
|
||||
return super.write(JSON.stringify(obj) + '\n', 'utf8', cb)
|
||||
} catch (er) {
|
||||
this.emit('error', er)
|
||||
}
|
||||
}
|
||||
end (obj, cb) {
|
||||
if (typeof obj === 'function') {
|
||||
cb = obj
|
||||
obj = undefined
|
||||
}
|
||||
if (obj !== undefined) {
|
||||
this.write(obj)
|
||||
}
|
||||
return super.end(cb)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### transform that parses newline-delimited JSON
|
||||
|
||||
```js
|
||||
class NDJSONDecode extends Minipass {
|
||||
constructor (options) {
|
||||
// always be in object mode, as far as Minipass is concerned
|
||||
super({ objectMode: true })
|
||||
this._jsonBuffer = ''
|
||||
}
|
||||
write (chunk, encoding, cb) {
|
||||
if (typeof chunk === 'string' &&
|
||||
typeof encoding === 'string' &&
|
||||
encoding !== 'utf8') {
|
||||
chunk = Buffer.from(chunk, encoding).toString()
|
||||
} else if (Buffer.isBuffer(chunk))
|
||||
chunk = chunk.toString()
|
||||
}
|
||||
if (typeof encoding === 'function') {
|
||||
cb = encoding
|
||||
}
|
||||
const jsonData = (this._jsonBuffer + chunk).split('\n')
|
||||
this._jsonBuffer = jsonData.pop()
|
||||
for (let i = 0; i < jsonData.length; i++) {
|
||||
try {
|
||||
// JSON.parse can throw, emit an error on that
|
||||
super.write(JSON.parse(jsonData[i]))
|
||||
} catch (er) {
|
||||
this.emit('error', er)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if (cb)
|
||||
cb()
|
||||
}
|
||||
}
|
||||
```
|
155
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/index.d.ts
generated
vendored
Normal file
155
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/index.d.ts
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from 'events'
|
||||
import { Stream } from 'stream'
|
||||
|
||||
declare namespace Minipass {
|
||||
type Encoding = BufferEncoding | 'buffer' | null
|
||||
|
||||
interface Writable extends EventEmitter {
|
||||
end(): any
|
||||
write(chunk: any, ...args: any[]): any
|
||||
}
|
||||
|
||||
interface Readable extends EventEmitter {
|
||||
pause(): any
|
||||
resume(): any
|
||||
pipe(): any
|
||||
}
|
||||
|
||||
interface Pipe<R, W> {
|
||||
src: Minipass<R, W>
|
||||
dest: Writable
|
||||
opts: PipeOptions
|
||||
}
|
||||
|
||||
type DualIterable<T> = Iterable<T> & AsyncIterable<T>
|
||||
|
||||
type ContiguousData = Buffer | ArrayBufferLike | ArrayBufferView | string
|
||||
|
||||
type BufferOrString = Buffer | string
|
||||
|
||||
interface StringOptions {
|
||||
encoding: BufferEncoding
|
||||
objectMode?: boolean
|
||||
async?: boolean
|
||||
}
|
||||
|
||||
interface BufferOptions {
|
||||
encoding?: null | 'buffer'
|
||||
objectMode?: boolean
|
||||
async?: boolean
|
||||
}
|
||||
|
||||
interface ObjectModeOptions {
|
||||
objectMode: true
|
||||
async?: boolean
|
||||
}
|
||||
|
||||
interface PipeOptions {
|
||||
end?: boolean
|
||||
proxyErrors?: boolean
|
||||
}
|
||||
|
||||
type Options<T> = T extends string
|
||||
? StringOptions
|
||||
: T extends Buffer
|
||||
? BufferOptions
|
||||
: ObjectModeOptions
|
||||
}
|
||||
|
||||
declare class Minipass<
|
||||
RType extends any = Buffer,
|
||||
WType extends any = RType extends Minipass.BufferOrString
|
||||
? Minipass.ContiguousData
|
||||
: RType
|
||||
>
|
||||
extends Stream
|
||||
implements Minipass.DualIterable<RType>
|
||||
{
|
||||
static isStream(stream: any): stream is Minipass.Readable | Minipass.Writable
|
||||
|
||||
readonly bufferLength: number
|
||||
readonly flowing: boolean
|
||||
readonly writable: boolean
|
||||
readonly readable: boolean
|
||||
readonly paused: boolean
|
||||
readonly emittedEnd: boolean
|
||||
readonly destroyed: boolean
|
||||
|
||||
/**
|
||||
* Not technically private or readonly, but not safe to mutate.
|
||||
*/
|
||||
private readonly buffer: RType[]
|
||||
private readonly pipes: Minipass.Pipe<RType, WType>[]
|
||||
|
||||
/**
|
||||
* Technically writable, but mutating it can change the type,
|
||||
* so is not safe to do in TypeScript.
|
||||
*/
|
||||
readonly objectMode: boolean
|
||||
async: boolean
|
||||
|
||||
/**
|
||||
* Note: encoding is not actually read-only, and setEncoding(enc)
|
||||
* exists. However, this type definition will insist that TypeScript
|
||||
* programs declare the type of a Minipass stream up front, and if
|
||||
* that type is string, then an encoding MUST be set in the ctor. If
|
||||
* the type is Buffer, then the encoding must be missing, or set to
|
||||
* 'buffer' or null. If the type is anything else, then objectMode
|
||||
* must be set in the constructor options. So there is effectively
|
||||
* no allowed way that a TS program can set the encoding after
|
||||
* construction, as doing so will destroy any hope of type safety.
|
||||
* TypeScript does not provide many options for changing the type of
|
||||
* an object at run-time, which is what changing the encoding does.
|
||||
*/
|
||||
readonly encoding: Minipass.Encoding
|
||||
// setEncoding(encoding: Encoding): void
|
||||
|
||||
// Options required if not reading buffers
|
||||
constructor(
|
||||
...args: RType extends Buffer
|
||||
? [] | [Minipass.Options<RType>]
|
||||
: [Minipass.Options<RType>]
|
||||
)
|
||||
|
||||
write(chunk: WType, cb?: () => void): boolean
|
||||
write(chunk: WType, encoding?: Minipass.Encoding, cb?: () => void): boolean
|
||||
read(size?: number): RType
|
||||
end(cb?: () => void): this
|
||||
end(chunk: any, cb?: () => void): this
|
||||
end(chunk: any, encoding?: Minipass.Encoding, cb?: () => void): this
|
||||
pause(): void
|
||||
resume(): void
|
||||
promise(): Promise<void>
|
||||
collect(): Promise<RType[]>
|
||||
|
||||
concat(): RType extends Minipass.BufferOrString ? Promise<RType> : never
|
||||
destroy(er?: any): void
|
||||
pipe<W extends Minipass.Writable>(dest: W, opts?: Minipass.PipeOptions): W
|
||||
unpipe<W extends Minipass.Writable>(dest: W): void
|
||||
|
||||
/**
|
||||
* alias for on()
|
||||
*/
|
||||
addEventHandler(event: string, listener: (...args: any[]) => any): this
|
||||
|
||||
on(event: string, listener: (...args: any[]) => any): this
|
||||
on(event: 'data', listener: (chunk: RType) => any): this
|
||||
on(event: 'error', listener: (error: any) => any): this
|
||||
on(
|
||||
event:
|
||||
| 'readable'
|
||||
| 'drain'
|
||||
| 'resume'
|
||||
| 'end'
|
||||
| 'prefinish'
|
||||
| 'finish'
|
||||
| 'close',
|
||||
listener: () => any
|
||||
): this
|
||||
|
||||
[Symbol.iterator](): Iterator<RType>
|
||||
[Symbol.asyncIterator](): AsyncIterator<RType>
|
||||
}
|
||||
|
||||
export = Minipass
|
649
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/index.js
generated
vendored
Normal file
649
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/index.js
generated
vendored
Normal file
@ -0,0 +1,649 @@
|
||||
'use strict'
|
||||
const proc = typeof process === 'object' && process ? process : {
|
||||
stdout: null,
|
||||
stderr: null,
|
||||
}
|
||||
const EE = require('events')
|
||||
const Stream = require('stream')
|
||||
const SD = require('string_decoder').StringDecoder
|
||||
|
||||
const EOF = Symbol('EOF')
|
||||
const MAYBE_EMIT_END = Symbol('maybeEmitEnd')
|
||||
const EMITTED_END = Symbol('emittedEnd')
|
||||
const EMITTING_END = Symbol('emittingEnd')
|
||||
const EMITTED_ERROR = Symbol('emittedError')
|
||||
const CLOSED = Symbol('closed')
|
||||
const READ = Symbol('read')
|
||||
const FLUSH = Symbol('flush')
|
||||
const FLUSHCHUNK = Symbol('flushChunk')
|
||||
const ENCODING = Symbol('encoding')
|
||||
const DECODER = Symbol('decoder')
|
||||
const FLOWING = Symbol('flowing')
|
||||
const PAUSED = Symbol('paused')
|
||||
const RESUME = Symbol('resume')
|
||||
const BUFFERLENGTH = Symbol('bufferLength')
|
||||
const BUFFERPUSH = Symbol('bufferPush')
|
||||
const BUFFERSHIFT = Symbol('bufferShift')
|
||||
const OBJECTMODE = Symbol('objectMode')
|
||||
const DESTROYED = Symbol('destroyed')
|
||||
const EMITDATA = Symbol('emitData')
|
||||
const EMITEND = Symbol('emitEnd')
|
||||
const EMITEND2 = Symbol('emitEnd2')
|
||||
const ASYNC = Symbol('async')
|
||||
|
||||
const defer = fn => Promise.resolve().then(fn)
|
||||
|
||||
// TODO remove when Node v8 support drops
|
||||
const doIter = global._MP_NO_ITERATOR_SYMBOLS_ !== '1'
|
||||
const ASYNCITERATOR = doIter && Symbol.asyncIterator
|
||||
|| Symbol('asyncIterator not implemented')
|
||||
const ITERATOR = doIter && Symbol.iterator
|
||||
|| Symbol('iterator not implemented')
|
||||
|
||||
// events that mean 'the stream is over'
|
||||
// these are treated specially, and re-emitted
|
||||
// if they are listened for after emitting.
|
||||
const isEndish = ev =>
|
||||
ev === 'end' ||
|
||||
ev === 'finish' ||
|
||||
ev === 'prefinish'
|
||||
|
||||
const isArrayBuffer = b => b instanceof ArrayBuffer ||
|
||||
typeof b === 'object' &&
|
||||
b.constructor &&
|
||||
b.constructor.name === 'ArrayBuffer' &&
|
||||
b.byteLength >= 0
|
||||
|
||||
const isArrayBufferView = b => !Buffer.isBuffer(b) && ArrayBuffer.isView(b)
|
||||
|
||||
class Pipe {
|
||||
constructor (src, dest, opts) {
|
||||
this.src = src
|
||||
this.dest = dest
|
||||
this.opts = opts
|
||||
this.ondrain = () => src[RESUME]()
|
||||
dest.on('drain', this.ondrain)
|
||||
}
|
||||
unpipe () {
|
||||
this.dest.removeListener('drain', this.ondrain)
|
||||
}
|
||||
// istanbul ignore next - only here for the prototype
|
||||
proxyErrors () {}
|
||||
end () {
|
||||
this.unpipe()
|
||||
if (this.opts.end)
|
||||
this.dest.end()
|
||||
}
|
||||
}
|
||||
|
||||
class PipeProxyErrors extends Pipe {
|
||||
unpipe () {
|
||||
this.src.removeListener('error', this.proxyErrors)
|
||||
super.unpipe()
|
||||
}
|
||||
constructor (src, dest, opts) {
|
||||
super(src, dest, opts)
|
||||
this.proxyErrors = er => dest.emit('error', er)
|
||||
src.on('error', this.proxyErrors)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = class Minipass extends Stream {
|
||||
constructor (options) {
|
||||
super()
|
||||
this[FLOWING] = false
|
||||
// whether we're explicitly paused
|
||||
this[PAUSED] = false
|
||||
this.pipes = []
|
||||
this.buffer = []
|
||||
this[OBJECTMODE] = options && options.objectMode || false
|
||||
if (this[OBJECTMODE])
|
||||
this[ENCODING] = null
|
||||
else
|
||||
this[ENCODING] = options && options.encoding || null
|
||||
if (this[ENCODING] === 'buffer')
|
||||
this[ENCODING] = null
|
||||
this[ASYNC] = options && !!options.async || false
|
||||
this[DECODER] = this[ENCODING] ? new SD(this[ENCODING]) : null
|
||||
this[EOF] = false
|
||||
this[EMITTED_END] = false
|
||||
this[EMITTING_END] = false
|
||||
this[CLOSED] = false
|
||||
this[EMITTED_ERROR] = null
|
||||
this.writable = true
|
||||
this.readable = true
|
||||
this[BUFFERLENGTH] = 0
|
||||
this[DESTROYED] = false
|
||||
}
|
||||
|
||||
get bufferLength () { return this[BUFFERLENGTH] }
|
||||
|
||||
get encoding () { return this[ENCODING] }
|
||||
set encoding (enc) {
|
||||
if (this[OBJECTMODE])
|
||||
throw new Error('cannot set encoding in objectMode')
|
||||
|
||||
if (this[ENCODING] && enc !== this[ENCODING] &&
|
||||
(this[DECODER] && this[DECODER].lastNeed || this[BUFFERLENGTH]))
|
||||
throw new Error('cannot change encoding')
|
||||
|
||||
if (this[ENCODING] !== enc) {
|
||||
this[DECODER] = enc ? new SD(enc) : null
|
||||
if (this.buffer.length)
|
||||
this.buffer = this.buffer.map(chunk => this[DECODER].write(chunk))
|
||||
}
|
||||
|
||||
this[ENCODING] = enc
|
||||
}
|
||||
|
||||
setEncoding (enc) {
|
||||
this.encoding = enc
|
||||
}
|
||||
|
||||
get objectMode () { return this[OBJECTMODE] }
|
||||
set objectMode (om) { this[OBJECTMODE] = this[OBJECTMODE] || !!om }
|
||||
|
||||
get ['async'] () { return this[ASYNC] }
|
||||
set ['async'] (a) { this[ASYNC] = this[ASYNC] || !!a }
|
||||
|
||||
write (chunk, encoding, cb) {
|
||||
if (this[EOF])
|
||||
throw new Error('write after end')
|
||||
|
||||
if (this[DESTROYED]) {
|
||||
this.emit('error', Object.assign(
|
||||
new Error('Cannot call write after a stream was destroyed'),
|
||||
{ code: 'ERR_STREAM_DESTROYED' }
|
||||
))
|
||||
return true
|
||||
}
|
||||
|
||||
if (typeof encoding === 'function')
|
||||
cb = encoding, encoding = 'utf8'
|
||||
|
||||
if (!encoding)
|
||||
encoding = 'utf8'
|
||||
|
||||
const fn = this[ASYNC] ? defer : f => f()
|
||||
|
||||
// convert array buffers and typed array views into buffers
|
||||
// at some point in the future, we may want to do the opposite!
|
||||
// leave strings and buffers as-is
|
||||
// anything else switches us into object mode
|
||||
if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {
|
||||
if (isArrayBufferView(chunk))
|
||||
chunk = Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
|
||||
else if (isArrayBuffer(chunk))
|
||||
chunk = Buffer.from(chunk)
|
||||
else if (typeof chunk !== 'string')
|
||||
// use the setter so we throw if we have encoding set
|
||||
this.objectMode = true
|
||||
}
|
||||
|
||||
// handle object mode up front, since it's simpler
|
||||
// this yields better performance, fewer checks later.
|
||||
if (this[OBJECTMODE]) {
|
||||
/* istanbul ignore if - maybe impossible? */
|
||||
if (this.flowing && this[BUFFERLENGTH] !== 0)
|
||||
this[FLUSH](true)
|
||||
|
||||
if (this.flowing)
|
||||
this.emit('data', chunk)
|
||||
else
|
||||
this[BUFFERPUSH](chunk)
|
||||
|
||||
if (this[BUFFERLENGTH] !== 0)
|
||||
this.emit('readable')
|
||||
|
||||
if (cb)
|
||||
fn(cb)
|
||||
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
// at this point the chunk is a buffer or string
|
||||
// don't buffer it up or send it to the decoder
|
||||
if (!chunk.length) {
|
||||
if (this[BUFFERLENGTH] !== 0)
|
||||
this.emit('readable')
|
||||
if (cb)
|
||||
fn(cb)
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
// fast-path writing strings of same encoding to a stream with
|
||||
// an empty buffer, skipping the buffer/decoder dance
|
||||
if (typeof chunk === 'string' &&
|
||||
// unless it is a string already ready for us to use
|
||||
!(encoding === this[ENCODING] && !this[DECODER].lastNeed)) {
|
||||
chunk = Buffer.from(chunk, encoding)
|
||||
}
|
||||
|
||||
if (Buffer.isBuffer(chunk) && this[ENCODING])
|
||||
chunk = this[DECODER].write(chunk)
|
||||
|
||||
// Note: flushing CAN potentially switch us into not-flowing mode
|
||||
if (this.flowing && this[BUFFERLENGTH] !== 0)
|
||||
this[FLUSH](true)
|
||||
|
||||
if (this.flowing)
|
||||
this.emit('data', chunk)
|
||||
else
|
||||
this[BUFFERPUSH](chunk)
|
||||
|
||||
if (this[BUFFERLENGTH] !== 0)
|
||||
this.emit('readable')
|
||||
|
||||
if (cb)
|
||||
fn(cb)
|
||||
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
read (n) {
|
||||
if (this[DESTROYED])
|
||||
return null
|
||||
|
||||
if (this[BUFFERLENGTH] === 0 || n === 0 || n > this[BUFFERLENGTH]) {
|
||||
this[MAYBE_EMIT_END]()
|
||||
return null
|
||||
}
|
||||
|
||||
if (this[OBJECTMODE])
|
||||
n = null
|
||||
|
||||
if (this.buffer.length > 1 && !this[OBJECTMODE]) {
|
||||
if (this.encoding)
|
||||
this.buffer = [this.buffer.join('')]
|
||||
else
|
||||
this.buffer = [Buffer.concat(this.buffer, this[BUFFERLENGTH])]
|
||||
}
|
||||
|
||||
const ret = this[READ](n || null, this.buffer[0])
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[READ] (n, chunk) {
|
||||
if (n === chunk.length || n === null)
|
||||
this[BUFFERSHIFT]()
|
||||
else {
|
||||
this.buffer[0] = chunk.slice(n)
|
||||
chunk = chunk.slice(0, n)
|
||||
this[BUFFERLENGTH] -= n
|
||||
}
|
||||
|
||||
this.emit('data', chunk)
|
||||
|
||||
if (!this.buffer.length && !this[EOF])
|
||||
this.emit('drain')
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
end (chunk, encoding, cb) {
|
||||
if (typeof chunk === 'function')
|
||||
cb = chunk, chunk = null
|
||||
if (typeof encoding === 'function')
|
||||
cb = encoding, encoding = 'utf8'
|
||||
if (chunk)
|
||||
this.write(chunk, encoding)
|
||||
if (cb)
|
||||
this.once('end', cb)
|
||||
this[EOF] = true
|
||||
this.writable = false
|
||||
|
||||
// if we haven't written anything, then go ahead and emit,
|
||||
// even if we're not reading.
|
||||
// we'll re-emit if a new 'end' listener is added anyway.
|
||||
// This makes MP more suitable to write-only use cases.
|
||||
if (this.flowing || !this[PAUSED])
|
||||
this[MAYBE_EMIT_END]()
|
||||
return this
|
||||
}
|
||||
|
||||
// don't let the internal resume be overwritten
|
||||
[RESUME] () {
|
||||
if (this[DESTROYED])
|
||||
return
|
||||
|
||||
this[PAUSED] = false
|
||||
this[FLOWING] = true
|
||||
this.emit('resume')
|
||||
if (this.buffer.length)
|
||||
this[FLUSH]()
|
||||
else if (this[EOF])
|
||||
this[MAYBE_EMIT_END]()
|
||||
else
|
||||
this.emit('drain')
|
||||
}
|
||||
|
||||
resume () {
|
||||
return this[RESUME]()
|
||||
}
|
||||
|
||||
pause () {
|
||||
this[FLOWING] = false
|
||||
this[PAUSED] = true
|
||||
}
|
||||
|
||||
get destroyed () {
|
||||
return this[DESTROYED]
|
||||
}
|
||||
|
||||
get flowing () {
|
||||
return this[FLOWING]
|
||||
}
|
||||
|
||||
get paused () {
|
||||
return this[PAUSED]
|
||||
}
|
||||
|
||||
[BUFFERPUSH] (chunk) {
|
||||
if (this[OBJECTMODE])
|
||||
this[BUFFERLENGTH] += 1
|
||||
else
|
||||
this[BUFFERLENGTH] += chunk.length
|
||||
this.buffer.push(chunk)
|
||||
}
|
||||
|
||||
[BUFFERSHIFT] () {
|
||||
if (this.buffer.length) {
|
||||
if (this[OBJECTMODE])
|
||||
this[BUFFERLENGTH] -= 1
|
||||
else
|
||||
this[BUFFERLENGTH] -= this.buffer[0].length
|
||||
}
|
||||
return this.buffer.shift()
|
||||
}
|
||||
|
||||
[FLUSH] (noDrain) {
|
||||
do {} while (this[FLUSHCHUNK](this[BUFFERSHIFT]()))
|
||||
|
||||
if (!noDrain && !this.buffer.length && !this[EOF])
|
||||
this.emit('drain')
|
||||
}
|
||||
|
||||
[FLUSHCHUNK] (chunk) {
|
||||
return chunk ? (this.emit('data', chunk), this.flowing) : false
|
||||
}
|
||||
|
||||
pipe (dest, opts) {
|
||||
if (this[DESTROYED])
|
||||
return
|
||||
|
||||
const ended = this[EMITTED_END]
|
||||
opts = opts || {}
|
||||
if (dest === proc.stdout || dest === proc.stderr)
|
||||
opts.end = false
|
||||
else
|
||||
opts.end = opts.end !== false
|
||||
opts.proxyErrors = !!opts.proxyErrors
|
||||
|
||||
// piping an ended stream ends immediately
|
||||
if (ended) {
|
||||
if (opts.end)
|
||||
dest.end()
|
||||
} else {
|
||||
this.pipes.push(!opts.proxyErrors ? new Pipe(this, dest, opts)
|
||||
: new PipeProxyErrors(this, dest, opts))
|
||||
if (this[ASYNC])
|
||||
defer(() => this[RESUME]())
|
||||
else
|
||||
this[RESUME]()
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
unpipe (dest) {
|
||||
const p = this.pipes.find(p => p.dest === dest)
|
||||
if (p) {
|
||||
this.pipes.splice(this.pipes.indexOf(p), 1)
|
||||
p.unpipe()
|
||||
}
|
||||
}
|
||||
|
||||
addListener (ev, fn) {
|
||||
return this.on(ev, fn)
|
||||
}
|
||||
|
||||
on (ev, fn) {
|
||||
const ret = super.on(ev, fn)
|
||||
if (ev === 'data' && !this.pipes.length && !this.flowing)
|
||||
this[RESUME]()
|
||||
else if (ev === 'readable' && this[BUFFERLENGTH] !== 0)
|
||||
super.emit('readable')
|
||||
else if (isEndish(ev) && this[EMITTED_END]) {
|
||||
super.emit(ev)
|
||||
this.removeAllListeners(ev)
|
||||
} else if (ev === 'error' && this[EMITTED_ERROR]) {
|
||||
if (this[ASYNC])
|
||||
defer(() => fn.call(this, this[EMITTED_ERROR]))
|
||||
else
|
||||
fn.call(this, this[EMITTED_ERROR])
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
get emittedEnd () {
|
||||
return this[EMITTED_END]
|
||||
}
|
||||
|
||||
[MAYBE_EMIT_END] () {
|
||||
if (!this[EMITTING_END] &&
|
||||
!this[EMITTED_END] &&
|
||||
!this[DESTROYED] &&
|
||||
this.buffer.length === 0 &&
|
||||
this[EOF]) {
|
||||
this[EMITTING_END] = true
|
||||
this.emit('end')
|
||||
this.emit('prefinish')
|
||||
this.emit('finish')
|
||||
if (this[CLOSED])
|
||||
this.emit('close')
|
||||
this[EMITTING_END] = false
|
||||
}
|
||||
}
|
||||
|
||||
emit (ev, data, ...extra) {
|
||||
// error and close are only events allowed after calling destroy()
|
||||
if (ev !== 'error' && ev !== 'close' && ev !== DESTROYED && this[DESTROYED])
|
||||
return
|
||||
else if (ev === 'data') {
|
||||
return !data ? false
|
||||
: this[ASYNC] ? defer(() => this[EMITDATA](data))
|
||||
: this[EMITDATA](data)
|
||||
} else if (ev === 'end') {
|
||||
return this[EMITEND]()
|
||||
} else if (ev === 'close') {
|
||||
this[CLOSED] = true
|
||||
// don't emit close before 'end' and 'finish'
|
||||
if (!this[EMITTED_END] && !this[DESTROYED])
|
||||
return
|
||||
const ret = super.emit('close')
|
||||
this.removeAllListeners('close')
|
||||
return ret
|
||||
} else if (ev === 'error') {
|
||||
this[EMITTED_ERROR] = data
|
||||
const ret = super.emit('error', data)
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
} else if (ev === 'resume') {
|
||||
const ret = super.emit('resume')
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
} else if (ev === 'finish' || ev === 'prefinish') {
|
||||
const ret = super.emit(ev)
|
||||
this.removeAllListeners(ev)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Some other unknown event
|
||||
const ret = super.emit(ev, data, ...extra)
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[EMITDATA] (data) {
|
||||
for (const p of this.pipes) {
|
||||
if (p.dest.write(data) === false)
|
||||
this.pause()
|
||||
}
|
||||
const ret = super.emit('data', data)
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[EMITEND] () {
|
||||
if (this[EMITTED_END])
|
||||
return
|
||||
|
||||
this[EMITTED_END] = true
|
||||
this.readable = false
|
||||
if (this[ASYNC])
|
||||
defer(() => this[EMITEND2]())
|
||||
else
|
||||
this[EMITEND2]()
|
||||
}
|
||||
|
||||
[EMITEND2] () {
|
||||
if (this[DECODER]) {
|
||||
const data = this[DECODER].end()
|
||||
if (data) {
|
||||
for (const p of this.pipes) {
|
||||
p.dest.write(data)
|
||||
}
|
||||
super.emit('data', data)
|
||||
}
|
||||
}
|
||||
|
||||
for (const p of this.pipes) {
|
||||
p.end()
|
||||
}
|
||||
const ret = super.emit('end')
|
||||
this.removeAllListeners('end')
|
||||
return ret
|
||||
}
|
||||
|
||||
// const all = await stream.collect()
|
||||
collect () {
|
||||
const buf = []
|
||||
if (!this[OBJECTMODE])
|
||||
buf.dataLength = 0
|
||||
// set the promise first, in case an error is raised
|
||||
// by triggering the flow here.
|
||||
const p = this.promise()
|
||||
this.on('data', c => {
|
||||
buf.push(c)
|
||||
if (!this[OBJECTMODE])
|
||||
buf.dataLength += c.length
|
||||
})
|
||||
return p.then(() => buf)
|
||||
}
|
||||
|
||||
// const data = await stream.concat()
|
||||
concat () {
|
||||
return this[OBJECTMODE]
|
||||
? Promise.reject(new Error('cannot concat in objectMode'))
|
||||
: this.collect().then(buf =>
|
||||
this[OBJECTMODE]
|
||||
? Promise.reject(new Error('cannot concat in objectMode'))
|
||||
: this[ENCODING] ? buf.join('') : Buffer.concat(buf, buf.dataLength))
|
||||
}
|
||||
|
||||
// stream.promise().then(() => done, er => emitted error)
|
||||
promise () {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.on(DESTROYED, () => reject(new Error('stream destroyed')))
|
||||
this.on('error', er => reject(er))
|
||||
this.on('end', () => resolve())
|
||||
})
|
||||
}
|
||||
|
||||
// for await (let chunk of stream)
|
||||
[ASYNCITERATOR] () {
|
||||
const next = () => {
|
||||
const res = this.read()
|
||||
if (res !== null)
|
||||
return Promise.resolve({ done: false, value: res })
|
||||
|
||||
if (this[EOF])
|
||||
return Promise.resolve({ done: true })
|
||||
|
||||
let resolve = null
|
||||
let reject = null
|
||||
const onerr = er => {
|
||||
this.removeListener('data', ondata)
|
||||
this.removeListener('end', onend)
|
||||
reject(er)
|
||||
}
|
||||
const ondata = value => {
|
||||
this.removeListener('error', onerr)
|
||||
this.removeListener('end', onend)
|
||||
this.pause()
|
||||
resolve({ value: value, done: !!this[EOF] })
|
||||
}
|
||||
const onend = () => {
|
||||
this.removeListener('error', onerr)
|
||||
this.removeListener('data', ondata)
|
||||
resolve({ done: true })
|
||||
}
|
||||
const ondestroy = () => onerr(new Error('stream destroyed'))
|
||||
return new Promise((res, rej) => {
|
||||
reject = rej
|
||||
resolve = res
|
||||
this.once(DESTROYED, ondestroy)
|
||||
this.once('error', onerr)
|
||||
this.once('end', onend)
|
||||
this.once('data', ondata)
|
||||
})
|
||||
}
|
||||
|
||||
return { next }
|
||||
}
|
||||
|
||||
// for (let chunk of stream)
|
||||
[ITERATOR] () {
|
||||
const next = () => {
|
||||
const value = this.read()
|
||||
const done = value === null
|
||||
return { value, done }
|
||||
}
|
||||
return { next }
|
||||
}
|
||||
|
||||
destroy (er) {
|
||||
if (this[DESTROYED]) {
|
||||
if (er)
|
||||
this.emit('error', er)
|
||||
else
|
||||
this.emit(DESTROYED)
|
||||
return this
|
||||
}
|
||||
|
||||
this[DESTROYED] = true
|
||||
|
||||
// throw away all buffered data, it's never coming out
|
||||
this.buffer.length = 0
|
||||
this[BUFFERLENGTH] = 0
|
||||
|
||||
if (typeof this.close === 'function' && !this[CLOSED])
|
||||
this.close()
|
||||
|
||||
if (er)
|
||||
this.emit('error', er)
|
||||
else // if no error to emit, still reject pending promises
|
||||
this.emit(DESTROYED)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
static isStream (s) {
|
||||
return !!s && (s instanceof Minipass || s instanceof Stream ||
|
||||
s instanceof EE && (
|
||||
typeof s.pipe === 'function' || // readable
|
||||
(typeof s.write === 'function' && typeof s.end === 'function') // writable
|
||||
))
|
||||
}
|
||||
}
|
56
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/package.json
generated
vendored
Normal file
56
backend/apis/nodejs/node_modules/minizlib/node_modules/minipass/package.json
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
{
|
||||
"name": "minipass",
|
||||
"version": "3.3.6",
|
||||
"description": "minimal implementation of a PassThrough stream",
|
||||
"main": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"dependencies": {
|
||||
"yallist": "^4.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^17.0.41",
|
||||
"end-of-stream": "^1.4.0",
|
||||
"prettier": "^2.6.2",
|
||||
"tap": "^16.2.0",
|
||||
"through2": "^2.0.3",
|
||||
"ts-node": "^10.8.1",
|
||||
"typescript": "^4.7.3"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "tap",
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"postpublish": "git push origin --follow-tags"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/isaacs/minipass.git"
|
||||
},
|
||||
"keywords": [
|
||||
"passthrough",
|
||||
"stream"
|
||||
],
|
||||
"author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me/)",
|
||||
"license": "ISC",
|
||||
"files": [
|
||||
"index.d.ts",
|
||||
"index.js"
|
||||
],
|
||||
"tap": {
|
||||
"check-coverage": true
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"prettier": {
|
||||
"semi": false,
|
||||
"printWidth": 80,
|
||||
"tabWidth": 2,
|
||||
"useTabs": false,
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": false,
|
||||
"bracketSameLine": true,
|
||||
"arrowParens": "avoid",
|
||||
"endOfLine": "lf"
|
||||
}
|
||||
}
|
42
backend/apis/nodejs/node_modules/minizlib/package.json
generated
vendored
Normal file
42
backend/apis/nodejs/node_modules/minizlib/package.json
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
{
|
||||
"name": "minizlib",
|
||||
"version": "2.1.2",
|
||||
"description": "A small fast zlib stream built on [minipass](http://npm.im/minipass) and Node.js's zlib binding.",
|
||||
"main": "index.js",
|
||||
"dependencies": {
|
||||
"minipass": "^3.0.0",
|
||||
"yallist": "^4.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "tap test/*.js --100 -J",
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"postpublish": "git push origin --all; git push origin --tags"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/isaacs/minizlib.git"
|
||||
},
|
||||
"keywords": [
|
||||
"zlib",
|
||||
"gzip",
|
||||
"gunzip",
|
||||
"deflate",
|
||||
"inflate",
|
||||
"compression",
|
||||
"zip",
|
||||
"unzip"
|
||||
],
|
||||
"author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me/)",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"tap": "^14.6.9"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"constants.js"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">= 8"
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user