Change endpoint from persons to people

This commit is contained in:
xfarrow
2025-03-23 21:00:08 +01:00
parent 4ae263662c
commit d005193f63
7158 changed files with 700476 additions and 735 deletions

111
backend/apis/nodejs/node_modules/tar/lib/create.js generated vendored Normal file
View File

@ -0,0 +1,111 @@
'use strict'
// tar -c
const hlo = require('./high-level-opt.js')
const Pack = require('./pack.js')
const fsm = require('fs-minipass')
const t = require('./list.js')
const path = require('path')
module.exports = (opt_, files, cb) => {
if (typeof files === 'function') {
cb = files
}
if (Array.isArray(opt_)) {
files = opt_, opt_ = {}
}
if (!files || !Array.isArray(files) || !files.length) {
throw new TypeError('no files or directories specified')
}
files = Array.from(files)
const opt = hlo(opt_)
if (opt.sync && typeof cb === 'function') {
throw new TypeError('callback not supported for sync tar functions')
}
if (!opt.file && typeof cb === 'function') {
throw new TypeError('callback only supported with file option')
}
return opt.file && opt.sync ? createFileSync(opt, files)
: opt.file ? createFile(opt, files, cb)
: opt.sync ? createSync(opt, files)
: create(opt, files)
}
const createFileSync = (opt, files) => {
const p = new Pack.Sync(opt)
const stream = new fsm.WriteStreamSync(opt.file, {
mode: opt.mode || 0o666,
})
p.pipe(stream)
addFilesSync(p, files)
}
const createFile = (opt, files, cb) => {
const p = new Pack(opt)
const stream = new fsm.WriteStream(opt.file, {
mode: opt.mode || 0o666,
})
p.pipe(stream)
const promise = new Promise((res, rej) => {
stream.on('error', rej)
stream.on('close', res)
p.on('error', rej)
})
addFilesAsync(p, files)
return cb ? promise.then(cb, cb) : promise
}
const addFilesSync = (p, files) => {
files.forEach(file => {
if (file.charAt(0) === '@') {
t({
file: path.resolve(p.cwd, file.slice(1)),
sync: true,
noResume: true,
onentry: entry => p.add(entry),
})
} else {
p.add(file)
}
})
p.end()
}
const addFilesAsync = (p, files) => {
while (files.length) {
const file = files.shift()
if (file.charAt(0) === '@') {
return t({
file: path.resolve(p.cwd, file.slice(1)),
noResume: true,
onentry: entry => p.add(entry),
}).then(_ => addFilesAsync(p, files))
} else {
p.add(file)
}
}
p.end()
}
const createSync = (opt, files) => {
const p = new Pack.Sync(opt)
addFilesSync(p, files)
return p
}
const create = (opt, files) => {
const p = new Pack(opt)
addFilesAsync(p, files)
return p
}

113
backend/apis/nodejs/node_modules/tar/lib/extract.js generated vendored Normal file
View File

@ -0,0 +1,113 @@
'use strict'
// tar -x
const hlo = require('./high-level-opt.js')
const Unpack = require('./unpack.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const path = require('path')
const stripSlash = require('./strip-trailing-slashes.js')
module.exports = (opt_, files, cb) => {
if (typeof opt_ === 'function') {
cb = opt_, files = null, opt_ = {}
} else if (Array.isArray(opt_)) {
files = opt_, opt_ = {}
}
if (typeof files === 'function') {
cb = files, files = null
}
if (!files) {
files = []
} else {
files = Array.from(files)
}
const opt = hlo(opt_)
if (opt.sync && typeof cb === 'function') {
throw new TypeError('callback not supported for sync tar functions')
}
if (!opt.file && typeof cb === 'function') {
throw new TypeError('callback only supported with file option')
}
if (files.length) {
filesFilter(opt, files)
}
return opt.file && opt.sync ? extractFileSync(opt)
: opt.file ? extractFile(opt, cb)
: opt.sync ? extractSync(opt)
: extract(opt)
}
// construct a filter that limits the file entries listed
// include child entries if a dir is included
const filesFilter = (opt, files) => {
const map = new Map(files.map(f => [stripSlash(f), true]))
const filter = opt.filter
const mapHas = (file, r) => {
const root = r || path.parse(file).root || '.'
const ret = file === root ? false
: map.has(file) ? map.get(file)
: mapHas(path.dirname(file), root)
map.set(file, ret)
return ret
}
opt.filter = filter
? (file, entry) => filter(file, entry) && mapHas(stripSlash(file))
: file => mapHas(stripSlash(file))
}
const extractFileSync = opt => {
const u = new Unpack.Sync(opt)
const file = opt.file
const stat = fs.statSync(file)
// This trades a zero-byte read() syscall for a stat
// However, it will usually result in less memory allocation
const readSize = opt.maxReadSize || 16 * 1024 * 1024
const stream = new fsm.ReadStreamSync(file, {
readSize: readSize,
size: stat.size,
})
stream.pipe(u)
}
const extractFile = (opt, cb) => {
const u = new Unpack(opt)
const readSize = opt.maxReadSize || 16 * 1024 * 1024
const file = opt.file
const p = new Promise((resolve, reject) => {
u.on('error', reject)
u.on('close', resolve)
// This trades a zero-byte read() syscall for a stat
// However, it will usually result in less memory allocation
fs.stat(file, (er, stat) => {
if (er) {
reject(er)
} else {
const stream = new fsm.ReadStream(file, {
readSize: readSize,
size: stat.size,
})
stream.on('error', reject)
stream.pipe(u)
}
})
})
return cb ? p.then(cb, cb) : p
}
const extractSync = opt => new Unpack.Sync(opt)
const extract = opt => new Unpack(opt)

View File

@ -0,0 +1,20 @@
// Get the appropriate flag to use for creating files
// We use fmap on Windows platforms for files less than
// 512kb. This is a fairly low limit, but avoids making
// things slower in some cases. Since most of what this
// library is used for is extracting tarballs of many
// relatively small files in npm packages and the like,
// it can be a big boost on Windows platforms.
// Only supported in Node v12.9.0 and above.
const platform = process.env.__FAKE_PLATFORM__ || process.platform
const isWindows = platform === 'win32'
const fs = global.__FAKE_TESTING_FS__ || require('fs')
/* istanbul ignore next */
const { O_CREAT, O_TRUNC, O_WRONLY, UV_FS_O_FILEMAP = 0 } = fs.constants
const fMapEnabled = isWindows && !!UV_FS_O_FILEMAP
const fMapLimit = 512 * 1024
const fMapFlag = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY
module.exports = !fMapEnabled ? () => 'w'
: size => size < fMapLimit ? fMapFlag : 'w'

304
backend/apis/nodejs/node_modules/tar/lib/header.js generated vendored Normal file
View File

@ -0,0 +1,304 @@
'use strict'
// parse a 512-byte header block to a data object, or vice-versa
// encode returns `true` if a pax extended header is needed, because
// the data could not be faithfully encoded in a simple header.
// (Also, check header.needPax to see if it needs a pax header.)
const types = require('./types.js')
const pathModule = require('path').posix
const large = require('./large-numbers.js')
const SLURP = Symbol('slurp')
const TYPE = Symbol('type')
class Header {
constructor (data, off, ex, gex) {
this.cksumValid = false
this.needPax = false
this.nullBlock = false
this.block = null
this.path = null
this.mode = null
this.uid = null
this.gid = null
this.size = null
this.mtime = null
this.cksum = null
this[TYPE] = '0'
this.linkpath = null
this.uname = null
this.gname = null
this.devmaj = 0
this.devmin = 0
this.atime = null
this.ctime = null
if (Buffer.isBuffer(data)) {
this.decode(data, off || 0, ex, gex)
} else if (data) {
this.set(data)
}
}
decode (buf, off, ex, gex) {
if (!off) {
off = 0
}
if (!buf || !(buf.length >= off + 512)) {
throw new Error('need 512 bytes for header')
}
this.path = decString(buf, off, 100)
this.mode = decNumber(buf, off + 100, 8)
this.uid = decNumber(buf, off + 108, 8)
this.gid = decNumber(buf, off + 116, 8)
this.size = decNumber(buf, off + 124, 12)
this.mtime = decDate(buf, off + 136, 12)
this.cksum = decNumber(buf, off + 148, 12)
// if we have extended or global extended headers, apply them now
// See https://github.com/npm/node-tar/pull/187
this[SLURP](ex)
this[SLURP](gex, true)
// old tar versions marked dirs as a file with a trailing /
this[TYPE] = decString(buf, off + 156, 1)
if (this[TYPE] === '') {
this[TYPE] = '0'
}
if (this[TYPE] === '0' && this.path.slice(-1) === '/') {
this[TYPE] = '5'
}
// tar implementations sometimes incorrectly put the stat(dir).size
// as the size in the tarball, even though Directory entries are
// not able to have any body at all. In the very rare chance that
// it actually DOES have a body, we weren't going to do anything with
// it anyway, and it'll just be a warning about an invalid header.
if (this[TYPE] === '5') {
this.size = 0
}
this.linkpath = decString(buf, off + 157, 100)
if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') {
this.uname = decString(buf, off + 265, 32)
this.gname = decString(buf, off + 297, 32)
this.devmaj = decNumber(buf, off + 329, 8)
this.devmin = decNumber(buf, off + 337, 8)
if (buf[off + 475] !== 0) {
// definitely a prefix, definitely >130 chars.
const prefix = decString(buf, off + 345, 155)
this.path = prefix + '/' + this.path
} else {
const prefix = decString(buf, off + 345, 130)
if (prefix) {
this.path = prefix + '/' + this.path
}
this.atime = decDate(buf, off + 476, 12)
this.ctime = decDate(buf, off + 488, 12)
}
}
let sum = 8 * 0x20
for (let i = off; i < off + 148; i++) {
sum += buf[i]
}
for (let i = off + 156; i < off + 512; i++) {
sum += buf[i]
}
this.cksumValid = sum === this.cksum
if (this.cksum === null && sum === 8 * 0x20) {
this.nullBlock = true
}
}
[SLURP] (ex, global) {
for (const k in ex) {
// we slurp in everything except for the path attribute in
// a global extended header, because that's weird.
if (ex[k] !== null && ex[k] !== undefined &&
!(global && k === 'path')) {
this[k] = ex[k]
}
}
}
encode (buf, off) {
if (!buf) {
buf = this.block = Buffer.alloc(512)
off = 0
}
if (!off) {
off = 0
}
if (!(buf.length >= off + 512)) {
throw new Error('need 512 bytes for header')
}
const prefixSize = this.ctime || this.atime ? 130 : 155
const split = splitPrefix(this.path || '', prefixSize)
const path = split[0]
const prefix = split[1]
this.needPax = split[2]
this.needPax = encString(buf, off, 100, path) || this.needPax
this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax
this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax
this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax
this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax
this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax
buf[off + 156] = this[TYPE].charCodeAt(0)
this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax
buf.write('ustar\u000000', off + 257, 8)
this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax
this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax
this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax
this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax
this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax
if (buf[off + 475] !== 0) {
this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax
} else {
this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax
this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax
this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax
}
let sum = 8 * 0x20
for (let i = off; i < off + 148; i++) {
sum += buf[i]
}
for (let i = off + 156; i < off + 512; i++) {
sum += buf[i]
}
this.cksum = sum
encNumber(buf, off + 148, 8, this.cksum)
this.cksumValid = true
return this.needPax
}
set (data) {
for (const i in data) {
if (data[i] !== null && data[i] !== undefined) {
this[i] = data[i]
}
}
}
get type () {
return types.name.get(this[TYPE]) || this[TYPE]
}
get typeKey () {
return this[TYPE]
}
set type (type) {
if (types.code.has(type)) {
this[TYPE] = types.code.get(type)
} else {
this[TYPE] = type
}
}
}
const splitPrefix = (p, prefixSize) => {
const pathSize = 100
let pp = p
let prefix = ''
let ret
const root = pathModule.parse(p).root || '.'
if (Buffer.byteLength(pp) < pathSize) {
ret = [pp, prefix, false]
} else {
// first set prefix to the dir, and path to the base
prefix = pathModule.dirname(pp)
pp = pathModule.basename(pp)
do {
if (Buffer.byteLength(pp) <= pathSize &&
Buffer.byteLength(prefix) <= prefixSize) {
// both fit!
ret = [pp, prefix, false]
} else if (Buffer.byteLength(pp) > pathSize &&
Buffer.byteLength(prefix) <= prefixSize) {
// prefix fits in prefix, but path doesn't fit in path
ret = [pp.slice(0, pathSize - 1), prefix, true]
} else {
// make path take a bit from prefix
pp = pathModule.join(pathModule.basename(prefix), pp)
prefix = pathModule.dirname(prefix)
}
} while (prefix !== root && !ret)
// at this point, found no resolution, just truncate
if (!ret) {
ret = [p.slice(0, pathSize - 1), '', true]
}
}
return ret
}
const decString = (buf, off, size) =>
buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '')
const decDate = (buf, off, size) =>
numToDate(decNumber(buf, off, size))
const numToDate = num => num === null ? null : new Date(num * 1000)
const decNumber = (buf, off, size) =>
buf[off] & 0x80 ? large.parse(buf.slice(off, off + size))
: decSmallNumber(buf, off, size)
const nanNull = value => isNaN(value) ? null : value
const decSmallNumber = (buf, off, size) =>
nanNull(parseInt(
buf.slice(off, off + size)
.toString('utf8').replace(/\0.*$/, '').trim(), 8))
// the maximum encodable as a null-terminated octal, by field size
const MAXNUM = {
12: 0o77777777777,
8: 0o7777777,
}
const encNumber = (buf, off, size, number) =>
number === null ? false :
number > MAXNUM[size] || number < 0
? (large.encode(number, buf.slice(off, off + size)), true)
: (encSmallNumber(buf, off, size, number), false)
const encSmallNumber = (buf, off, size, number) =>
buf.write(octalString(number, size), off, size, 'ascii')
const octalString = (number, size) =>
padOctal(Math.floor(number).toString(8), size)
const padOctal = (string, size) =>
(string.length === size - 1 ? string
: new Array(size - string.length - 1).join('0') + string + ' ') + '\0'
const encDate = (buf, off, size, date) =>
date === null ? false :
encNumber(buf, off, size, date.getTime() / 1000)
// enough to fill the longest string we've got
const NULLS = new Array(156).join('\0')
// pad with nulls, return true if it's longer or non-ascii
const encString = (buf, off, size, string) =>
string === null ? false :
(buf.write(string + NULLS, off, size, 'utf8'),
string.length !== Buffer.byteLength(string) || string.length > size)
module.exports = Header

View File

@ -0,0 +1,29 @@
'use strict'
// turn tar(1) style args like `C` into the more verbose things like `cwd`
const argmap = new Map([
['C', 'cwd'],
['f', 'file'],
['z', 'gzip'],
['P', 'preservePaths'],
['U', 'unlink'],
['strip-components', 'strip'],
['stripComponents', 'strip'],
['keep-newer', 'newer'],
['keepNewer', 'newer'],
['keep-newer-files', 'newer'],
['keepNewerFiles', 'newer'],
['k', 'keep'],
['keep-existing', 'keep'],
['keepExisting', 'keep'],
['m', 'noMtime'],
['no-mtime', 'noMtime'],
['p', 'preserveOwner'],
['L', 'follow'],
['h', 'follow'],
])
module.exports = opt => opt ? Object.keys(opt).map(k => [
argmap.has(k) ? argmap.get(k) : k, opt[k],
]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {}

View File

@ -0,0 +1,104 @@
'use strict'
// Tar can encode large and negative numbers using a leading byte of
// 0xff for negative, and 0x80 for positive.
const encode = (num, buf) => {
if (!Number.isSafeInteger(num)) {
// The number is so large that javascript cannot represent it with integer
// precision.
throw Error('cannot encode number outside of javascript safe integer range')
} else if (num < 0) {
encodeNegative(num, buf)
} else {
encodePositive(num, buf)
}
return buf
}
const encodePositive = (num, buf) => {
buf[0] = 0x80
for (var i = buf.length; i > 1; i--) {
buf[i - 1] = num & 0xff
num = Math.floor(num / 0x100)
}
}
const encodeNegative = (num, buf) => {
buf[0] = 0xff
var flipped = false
num = num * -1
for (var i = buf.length; i > 1; i--) {
var byte = num & 0xff
num = Math.floor(num / 0x100)
if (flipped) {
buf[i - 1] = onesComp(byte)
} else if (byte === 0) {
buf[i - 1] = 0
} else {
flipped = true
buf[i - 1] = twosComp(byte)
}
}
}
const parse = (buf) => {
const pre = buf[0]
const value = pre === 0x80 ? pos(buf.slice(1, buf.length))
: pre === 0xff ? twos(buf)
: null
if (value === null) {
throw Error('invalid base256 encoding')
}
if (!Number.isSafeInteger(value)) {
// The number is so large that javascript cannot represent it with integer
// precision.
throw Error('parsed number outside of javascript safe integer range')
}
return value
}
const twos = (buf) => {
var len = buf.length
var sum = 0
var flipped = false
for (var i = len - 1; i > -1; i--) {
var byte = buf[i]
var f
if (flipped) {
f = onesComp(byte)
} else if (byte === 0) {
f = byte
} else {
flipped = true
f = twosComp(byte)
}
if (f !== 0) {
sum -= f * Math.pow(256, len - i - 1)
}
}
return sum
}
const pos = (buf) => {
var len = buf.length
var sum = 0
for (var i = len - 1; i > -1; i--) {
var byte = buf[i]
if (byte !== 0) {
sum += byte * Math.pow(256, len - i - 1)
}
}
return sum
}
const onesComp = byte => (0xff ^ byte) & 0xff
const twosComp = byte => ((0xff ^ byte) + 1) & 0xff
module.exports = {
encode,
parse,
}

139
backend/apis/nodejs/node_modules/tar/lib/list.js generated vendored Normal file
View File

@ -0,0 +1,139 @@
'use strict'
// XXX: This shares a lot in common with extract.js
// maybe some DRY opportunity here?
// tar -t
const hlo = require('./high-level-opt.js')
const Parser = require('./parse.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const path = require('path')
const stripSlash = require('./strip-trailing-slashes.js')
module.exports = (opt_, files, cb) => {
if (typeof opt_ === 'function') {
cb = opt_, files = null, opt_ = {}
} else if (Array.isArray(opt_)) {
files = opt_, opt_ = {}
}
if (typeof files === 'function') {
cb = files, files = null
}
if (!files) {
files = []
} else {
files = Array.from(files)
}
const opt = hlo(opt_)
if (opt.sync && typeof cb === 'function') {
throw new TypeError('callback not supported for sync tar functions')
}
if (!opt.file && typeof cb === 'function') {
throw new TypeError('callback only supported with file option')
}
if (files.length) {
filesFilter(opt, files)
}
if (!opt.noResume) {
onentryFunction(opt)
}
return opt.file && opt.sync ? listFileSync(opt)
: opt.file ? listFile(opt, cb)
: list(opt)
}
const onentryFunction = opt => {
const onentry = opt.onentry
opt.onentry = onentry ? e => {
onentry(e)
e.resume()
} : e => e.resume()
}
// construct a filter that limits the file entries listed
// include child entries if a dir is included
const filesFilter = (opt, files) => {
const map = new Map(files.map(f => [stripSlash(f), true]))
const filter = opt.filter
const mapHas = (file, r) => {
const root = r || path.parse(file).root || '.'
const ret = file === root ? false
: map.has(file) ? map.get(file)
: mapHas(path.dirname(file), root)
map.set(file, ret)
return ret
}
opt.filter = filter
? (file, entry) => filter(file, entry) && mapHas(stripSlash(file))
: file => mapHas(stripSlash(file))
}
const listFileSync = opt => {
const p = list(opt)
const file = opt.file
let threw = true
let fd
try {
const stat = fs.statSync(file)
const readSize = opt.maxReadSize || 16 * 1024 * 1024
if (stat.size < readSize) {
p.end(fs.readFileSync(file))
} else {
let pos = 0
const buf = Buffer.allocUnsafe(readSize)
fd = fs.openSync(file, 'r')
while (pos < stat.size) {
const bytesRead = fs.readSync(fd, buf, 0, readSize, pos)
pos += bytesRead
p.write(buf.slice(0, bytesRead))
}
p.end()
}
threw = false
} finally {
if (threw && fd) {
try {
fs.closeSync(fd)
} catch (er) {}
}
}
}
const listFile = (opt, cb) => {
const parse = new Parser(opt)
const readSize = opt.maxReadSize || 16 * 1024 * 1024
const file = opt.file
const p = new Promise((resolve, reject) => {
parse.on('error', reject)
parse.on('end', resolve)
fs.stat(file, (er, stat) => {
if (er) {
reject(er)
} else {
const stream = new fsm.ReadStream(file, {
readSize: readSize,
size: stat.size,
})
stream.on('error', reject)
stream.pipe(parse)
}
})
})
return cb ? p.then(cb, cb) : p
}
const list = opt => new Parser(opt)

229
backend/apis/nodejs/node_modules/tar/lib/mkdir.js generated vendored Normal file
View File

@ -0,0 +1,229 @@
'use strict'
// wrapper around mkdirp for tar's needs.
// TODO: This should probably be a class, not functionally
// passing around state in a gazillion args.
const mkdirp = require('mkdirp')
const fs = require('fs')
const path = require('path')
const chownr = require('chownr')
const normPath = require('./normalize-windows-path.js')
class SymlinkError extends Error {
constructor (symlink, path) {
super('Cannot extract through symbolic link')
this.path = path
this.symlink = symlink
}
get name () {
return 'SylinkError'
}
}
class CwdError extends Error {
constructor (path, code) {
super(code + ': Cannot cd into \'' + path + '\'')
this.path = path
this.code = code
}
get name () {
return 'CwdError'
}
}
const cGet = (cache, key) => cache.get(normPath(key))
const cSet = (cache, key, val) => cache.set(normPath(key), val)
const checkCwd = (dir, cb) => {
fs.stat(dir, (er, st) => {
if (er || !st.isDirectory()) {
er = new CwdError(dir, er && er.code || 'ENOTDIR')
}
cb(er)
})
}
module.exports = (dir, opt, cb) => {
dir = normPath(dir)
// if there's any overlap between mask and mode,
// then we'll need an explicit chmod
const umask = opt.umask
const mode = opt.mode | 0o0700
const needChmod = (mode & umask) !== 0
const uid = opt.uid
const gid = opt.gid
const doChown = typeof uid === 'number' &&
typeof gid === 'number' &&
(uid !== opt.processUid || gid !== opt.processGid)
const preserve = opt.preserve
const unlink = opt.unlink
const cache = opt.cache
const cwd = normPath(opt.cwd)
const done = (er, created) => {
if (er) {
cb(er)
} else {
cSet(cache, dir, true)
if (created && doChown) {
chownr(created, uid, gid, er => done(er))
} else if (needChmod) {
fs.chmod(dir, mode, cb)
} else {
cb()
}
}
}
if (cache && cGet(cache, dir) === true) {
return done()
}
if (dir === cwd) {
return checkCwd(dir, done)
}
if (preserve) {
return mkdirp(dir, { mode }).then(made => done(null, made), done)
}
const sub = normPath(path.relative(cwd, dir))
const parts = sub.split('/')
mkdir_(cwd, parts, mode, cache, unlink, cwd, null, done)
}
const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => {
if (!parts.length) {
return cb(null, created)
}
const p = parts.shift()
const part = normPath(path.resolve(base + '/' + p))
if (cGet(cache, part)) {
return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
}
fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
}
const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => er => {
if (er) {
fs.lstat(part, (statEr, st) => {
if (statEr) {
statEr.path = statEr.path && normPath(statEr.path)
cb(statEr)
} else if (st.isDirectory()) {
mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
} else if (unlink) {
fs.unlink(part, er => {
if (er) {
return cb(er)
}
fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
})
} else if (st.isSymbolicLink()) {
return cb(new SymlinkError(part, part + '/' + parts.join('/')))
} else {
cb(er)
}
})
} else {
created = created || part
mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
}
}
const checkCwdSync = dir => {
let ok = false
let code = 'ENOTDIR'
try {
ok = fs.statSync(dir).isDirectory()
} catch (er) {
code = er.code
} finally {
if (!ok) {
throw new CwdError(dir, code)
}
}
}
module.exports.sync = (dir, opt) => {
dir = normPath(dir)
// if there's any overlap between mask and mode,
// then we'll need an explicit chmod
const umask = opt.umask
const mode = opt.mode | 0o0700
const needChmod = (mode & umask) !== 0
const uid = opt.uid
const gid = opt.gid
const doChown = typeof uid === 'number' &&
typeof gid === 'number' &&
(uid !== opt.processUid || gid !== opt.processGid)
const preserve = opt.preserve
const unlink = opt.unlink
const cache = opt.cache
const cwd = normPath(opt.cwd)
const done = (created) => {
cSet(cache, dir, true)
if (created && doChown) {
chownr.sync(created, uid, gid)
}
if (needChmod) {
fs.chmodSync(dir, mode)
}
}
if (cache && cGet(cache, dir) === true) {
return done()
}
if (dir === cwd) {
checkCwdSync(cwd)
return done()
}
if (preserve) {
return done(mkdirp.sync(dir, mode))
}
const sub = normPath(path.relative(cwd, dir))
const parts = sub.split('/')
let created = null
for (let p = parts.shift(), part = cwd;
p && (part += '/' + p);
p = parts.shift()) {
part = normPath(path.resolve(part))
if (cGet(cache, part)) {
continue
}
try {
fs.mkdirSync(part, mode)
created = created || part
cSet(cache, part, true)
} catch (er) {
const st = fs.lstatSync(part)
if (st.isDirectory()) {
cSet(cache, part, true)
continue
} else if (unlink) {
fs.unlinkSync(part)
fs.mkdirSync(part, mode)
created = created || part
cSet(cache, part, true)
continue
} else if (st.isSymbolicLink()) {
return new SymlinkError(part, part + '/' + parts.join('/'))
}
}
}
return done(created)
}

27
backend/apis/nodejs/node_modules/tar/lib/mode-fix.js generated vendored Normal file
View File

@ -0,0 +1,27 @@
'use strict'
module.exports = (mode, isDir, portable) => {
mode &= 0o7777
// in portable mode, use the minimum reasonable umask
// if this system creates files with 0o664 by default
// (as some linux distros do), then we'll write the
// archive with 0o644 instead. Also, don't ever create
// a file that is not readable/writable by the owner.
if (portable) {
mode = (mode | 0o600) & ~0o22
}
// if dirs are readable, then they should be listable
if (isDir) {
if (mode & 0o400) {
mode |= 0o100
}
if (mode & 0o40) {
mode |= 0o10
}
if (mode & 0o4) {
mode |= 0o1
}
}
return mode
}

View File

@ -0,0 +1,12 @@
// warning: extremely hot code path.
// This has been meticulously optimized for use
// within npm install on large package trees.
// Do not edit without careful benchmarking.
const normalizeCache = Object.create(null)
const { hasOwnProperty } = Object.prototype
module.exports = s => {
if (!hasOwnProperty.call(normalizeCache, s)) {
normalizeCache[s] = s.normalize('NFD')
}
return normalizeCache[s]
}

View File

@ -0,0 +1,8 @@
// on windows, either \ or / are valid directory separators.
// on unix, \ is a valid character in filenames.
// so, on windows, and only on windows, we replace all \ chars with /,
// so that we can use / as our one and only directory separator char.
const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
module.exports = platform !== 'win32' ? p => p
: p => p && p.replace(/\\/g, '/')

432
backend/apis/nodejs/node_modules/tar/lib/pack.js generated vendored Normal file
View File

@ -0,0 +1,432 @@
'use strict'
// A readable tar stream creator
// Technically, this is a transform stream that you write paths into,
// and tar format comes out of.
// The `add()` method is like `write()` but returns this,
// and end() return `this` as well, so you can
// do `new Pack(opt).add('files').add('dir').end().pipe(output)
// You could also do something like:
// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
class PackJob {
constructor (path, absolute) {
this.path = path || './'
this.absolute = absolute
this.entry = null
this.stat = null
this.readdir = null
this.pending = false
this.ignore = false
this.piped = false
}
}
const { Minipass } = require('minipass')
const zlib = require('minizlib')
const ReadEntry = require('./read-entry.js')
const WriteEntry = require('./write-entry.js')
const WriteEntrySync = WriteEntry.Sync
const WriteEntryTar = WriteEntry.Tar
const Yallist = require('yallist')
const EOF = Buffer.alloc(1024)
const ONSTAT = Symbol('onStat')
const ENDED = Symbol('ended')
const QUEUE = Symbol('queue')
const CURRENT = Symbol('current')
const PROCESS = Symbol('process')
const PROCESSING = Symbol('processing')
const PROCESSJOB = Symbol('processJob')
const JOBS = Symbol('jobs')
const JOBDONE = Symbol('jobDone')
const ADDFSENTRY = Symbol('addFSEntry')
const ADDTARENTRY = Symbol('addTarEntry')
const STAT = Symbol('stat')
const READDIR = Symbol('readdir')
const ONREADDIR = Symbol('onreaddir')
const PIPE = Symbol('pipe')
const ENTRY = Symbol('entry')
const ENTRYOPT = Symbol('entryOpt')
const WRITEENTRYCLASS = Symbol('writeEntryClass')
const WRITE = Symbol('write')
const ONDRAIN = Symbol('ondrain')
const fs = require('fs')
const path = require('path')
const warner = require('./warn-mixin.js')
const normPath = require('./normalize-windows-path.js')
const Pack = warner(class Pack extends Minipass {
constructor (opt) {
super(opt)
opt = opt || Object.create(null)
this.opt = opt
this.file = opt.file || ''
this.cwd = opt.cwd || process.cwd()
this.maxReadSize = opt.maxReadSize
this.preservePaths = !!opt.preservePaths
this.strict = !!opt.strict
this.noPax = !!opt.noPax
this.prefix = normPath(opt.prefix || '')
this.linkCache = opt.linkCache || new Map()
this.statCache = opt.statCache || new Map()
this.readdirCache = opt.readdirCache || new Map()
this[WRITEENTRYCLASS] = WriteEntry
if (typeof opt.onwarn === 'function') {
this.on('warn', opt.onwarn)
}
this.portable = !!opt.portable
this.zip = null
if (opt.gzip || opt.brotli) {
if (opt.gzip && opt.brotli) {
throw new TypeError('gzip and brotli are mutually exclusive')
}
if (opt.gzip) {
if (typeof opt.gzip !== 'object') {
opt.gzip = {}
}
if (this.portable) {
opt.gzip.portable = true
}
this.zip = new zlib.Gzip(opt.gzip)
}
if (opt.brotli) {
if (typeof opt.brotli !== 'object') {
opt.brotli = {}
}
this.zip = new zlib.BrotliCompress(opt.brotli)
}
this.zip.on('data', chunk => super.write(chunk))
this.zip.on('end', _ => super.end())
this.zip.on('drain', _ => this[ONDRAIN]())
this.on('resume', _ => this.zip.resume())
} else {
this.on('drain', this[ONDRAIN])
}
this.noDirRecurse = !!opt.noDirRecurse
this.follow = !!opt.follow
this.noMtime = !!opt.noMtime
this.mtime = opt.mtime || null
this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
this[QUEUE] = new Yallist()
this[JOBS] = 0
this.jobs = +opt.jobs || 4
this[PROCESSING] = false
this[ENDED] = false
}
[WRITE] (chunk) {
return super.write(chunk)
}
add (path) {
this.write(path)
return this
}
end (path) {
if (path) {
this.write(path)
}
this[ENDED] = true
this[PROCESS]()
return this
}
write (path) {
if (this[ENDED]) {
throw new Error('write after end')
}
if (path instanceof ReadEntry) {
this[ADDTARENTRY](path)
} else {
this[ADDFSENTRY](path)
}
return this.flowing
}
[ADDTARENTRY] (p) {
const absolute = normPath(path.resolve(this.cwd, p.path))
// in this case, we don't have to wait for the stat
if (!this.filter(p.path, p)) {
p.resume()
} else {
const job = new PackJob(p.path, absolute, false)
job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
job.entry.on('end', _ => this[JOBDONE](job))
this[JOBS] += 1
this[QUEUE].push(job)
}
this[PROCESS]()
}
[ADDFSENTRY] (p) {
const absolute = normPath(path.resolve(this.cwd, p))
this[QUEUE].push(new PackJob(p, absolute))
this[PROCESS]()
}
[STAT] (job) {
job.pending = true
this[JOBS] += 1
const stat = this.follow ? 'stat' : 'lstat'
fs[stat](job.absolute, (er, stat) => {
job.pending = false
this[JOBS] -= 1
if (er) {
this.emit('error', er)
} else {
this[ONSTAT](job, stat)
}
})
}
[ONSTAT] (job, stat) {
this.statCache.set(job.absolute, stat)
job.stat = stat
// now we have the stat, we can filter it.
if (!this.filter(job.path, stat)) {
job.ignore = true
}
this[PROCESS]()
}
[READDIR] (job) {
job.pending = true
this[JOBS] += 1
fs.readdir(job.absolute, (er, entries) => {
job.pending = false
this[JOBS] -= 1
if (er) {
return this.emit('error', er)
}
this[ONREADDIR](job, entries)
})
}
[ONREADDIR] (job, entries) {
this.readdirCache.set(job.absolute, entries)
job.readdir = entries
this[PROCESS]()
}
[PROCESS] () {
if (this[PROCESSING]) {
return
}
this[PROCESSING] = true
for (let w = this[QUEUE].head;
w !== null && this[JOBS] < this.jobs;
w = w.next) {
this[PROCESSJOB](w.value)
if (w.value.ignore) {
const p = w.next
this[QUEUE].removeNode(w)
w.next = p
}
}
this[PROCESSING] = false
if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
if (this.zip) {
this.zip.end(EOF)
} else {
super.write(EOF)
super.end()
}
}
}
get [CURRENT] () {
return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
}
[JOBDONE] (job) {
this[QUEUE].shift()
this[JOBS] -= 1
this[PROCESS]()
}
[PROCESSJOB] (job) {
if (job.pending) {
return
}
if (job.entry) {
if (job === this[CURRENT] && !job.piped) {
this[PIPE](job)
}
return
}
if (!job.stat) {
if (this.statCache.has(job.absolute)) {
this[ONSTAT](job, this.statCache.get(job.absolute))
} else {
this[STAT](job)
}
}
if (!job.stat) {
return
}
// filtered out!
if (job.ignore) {
return
}
if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
if (this.readdirCache.has(job.absolute)) {
this[ONREADDIR](job, this.readdirCache.get(job.absolute))
} else {
this[READDIR](job)
}
if (!job.readdir) {
return
}
}
// we know it doesn't have an entry, because that got checked above
job.entry = this[ENTRY](job)
if (!job.entry) {
job.ignore = true
return
}
if (job === this[CURRENT] && !job.piped) {
this[PIPE](job)
}
}
[ENTRYOPT] (job) {
return {
onwarn: (code, msg, data) => this.warn(code, msg, data),
noPax: this.noPax,
cwd: this.cwd,
absolute: job.absolute,
preservePaths: this.preservePaths,
maxReadSize: this.maxReadSize,
strict: this.strict,
portable: this.portable,
linkCache: this.linkCache,
statCache: this.statCache,
noMtime: this.noMtime,
mtime: this.mtime,
prefix: this.prefix,
}
}
[ENTRY] (job) {
this[JOBS] += 1
try {
return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
.on('end', () => this[JOBDONE](job))
.on('error', er => this.emit('error', er))
} catch (er) {
this.emit('error', er)
}
}
[ONDRAIN] () {
if (this[CURRENT] && this[CURRENT].entry) {
this[CURRENT].entry.resume()
}
}
// like .pipe() but using super, because our write() is special
[PIPE] (job) {
job.piped = true
if (job.readdir) {
job.readdir.forEach(entry => {
const p = job.path
const base = p === './' ? '' : p.replace(/\/*$/, '/')
this[ADDFSENTRY](base + entry)
})
}
const source = job.entry
const zip = this.zip
if (zip) {
source.on('data', chunk => {
if (!zip.write(chunk)) {
source.pause()
}
})
} else {
source.on('data', chunk => {
if (!super.write(chunk)) {
source.pause()
}
})
}
}
pause () {
if (this.zip) {
this.zip.pause()
}
return super.pause()
}
})
class PackSync extends Pack {
constructor (opt) {
super(opt)
this[WRITEENTRYCLASS] = WriteEntrySync
}
// pause/resume are no-ops in sync streams.
pause () {}
resume () {}
[STAT] (job) {
const stat = this.follow ? 'statSync' : 'lstatSync'
this[ONSTAT](job, fs[stat](job.absolute))
}
[READDIR] (job, stat) {
this[ONREADDIR](job, fs.readdirSync(job.absolute))
}
// gotta get it all in this tick
[PIPE] (job) {
const source = job.entry
const zip = this.zip
if (job.readdir) {
job.readdir.forEach(entry => {
const p = job.path
const base = p === './' ? '' : p.replace(/\/*$/, '/')
this[ADDFSENTRY](base + entry)
})
}
if (zip) {
source.on('data', chunk => {
zip.write(chunk)
})
} else {
source.on('data', chunk => {
super[WRITE](chunk)
})
}
}
}
Pack.Sync = PackSync
module.exports = Pack

552
backend/apis/nodejs/node_modules/tar/lib/parse.js generated vendored Normal file
View File

@ -0,0 +1,552 @@
'use strict'
// this[BUFFER] is the remainder of a chunk if we're waiting for
// the full 512 bytes of a header to come in. We will Buffer.concat()
// it to the next write(), which is a mem copy, but a small one.
//
// this[QUEUE] is a Yallist of entries that haven't been emitted
// yet this can only get filled up if the user keeps write()ing after
// a write() returns false, or does a write() with more than one entry
//
// We don't buffer chunks, we always parse them and either create an
// entry, or push it into the active entry. The ReadEntry class knows
// to throw data away if .ignore=true
//
// Shift entry off the buffer when it emits 'end', and emit 'entry' for
// the next one in the list.
//
// At any time, we're pushing body chunks into the entry at WRITEENTRY,
// and waiting for 'end' on the entry at READENTRY
//
// ignored entries get .resume() called on them straight away
const warner = require('./warn-mixin.js')
const Header = require('./header.js')
const EE = require('events')
const Yallist = require('yallist')
const maxMetaEntrySize = 1024 * 1024
const Entry = require('./read-entry.js')
const Pax = require('./pax.js')
const zlib = require('minizlib')
const { nextTick } = require('process')
const gzipHeader = Buffer.from([0x1f, 0x8b])
const STATE = Symbol('state')
const WRITEENTRY = Symbol('writeEntry')
const READENTRY = Symbol('readEntry')
const NEXTENTRY = Symbol('nextEntry')
const PROCESSENTRY = Symbol('processEntry')
const EX = Symbol('extendedHeader')
const GEX = Symbol('globalExtendedHeader')
const META = Symbol('meta')
const EMITMETA = Symbol('emitMeta')
const BUFFER = Symbol('buffer')
const QUEUE = Symbol('queue')
const ENDED = Symbol('ended')
const EMITTEDEND = Symbol('emittedEnd')
const EMIT = Symbol('emit')
const UNZIP = Symbol('unzip')
const CONSUMECHUNK = Symbol('consumeChunk')
const CONSUMECHUNKSUB = Symbol('consumeChunkSub')
const CONSUMEBODY = Symbol('consumeBody')
const CONSUMEMETA = Symbol('consumeMeta')
const CONSUMEHEADER = Symbol('consumeHeader')
const CONSUMING = Symbol('consuming')
const BUFFERCONCAT = Symbol('bufferConcat')
const MAYBEEND = Symbol('maybeEnd')
const WRITING = Symbol('writing')
const ABORTED = Symbol('aborted')
const DONE = Symbol('onDone')
const SAW_VALID_ENTRY = Symbol('sawValidEntry')
const SAW_NULL_BLOCK = Symbol('sawNullBlock')
const SAW_EOF = Symbol('sawEOF')
const CLOSESTREAM = Symbol('closeStream')
const noop = _ => true
module.exports = warner(class Parser extends EE {
constructor (opt) {
opt = opt || {}
super(opt)
this.file = opt.file || ''
// set to boolean false when an entry starts. 1024 bytes of \0
// is technically a valid tarball, albeit a boring one.
this[SAW_VALID_ENTRY] = null
// these BADARCHIVE errors can't be detected early. listen on DONE.
this.on(DONE, _ => {
if (this[STATE] === 'begin' || this[SAW_VALID_ENTRY] === false) {
// either less than 1 block of data, or all entries were invalid.
// Either way, probably not even a tarball.
this.warn('TAR_BAD_ARCHIVE', 'Unrecognized archive format')
}
})
if (opt.ondone) {
this.on(DONE, opt.ondone)
} else {
this.on(DONE, _ => {
this.emit('prefinish')
this.emit('finish')
this.emit('end')
})
}
this.strict = !!opt.strict
this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize
this.filter = typeof opt.filter === 'function' ? opt.filter : noop
// Unlike gzip, brotli doesn't have any magic bytes to identify it
// Users need to explicitly tell us they're extracting a brotli file
// Or we infer from the file extension
const isTBR = (opt.file && (
opt.file.endsWith('.tar.br') || opt.file.endsWith('.tbr')))
// if it's a tbr file it MIGHT be brotli, but we don't know until
// we look at it and verify it's not a valid tar file.
this.brotli = !opt.gzip && opt.brotli !== undefined ? opt.brotli
: isTBR ? undefined
: false
// have to set this so that streams are ok piping into it
this.writable = true
this.readable = false
this[QUEUE] = new Yallist()
this[BUFFER] = null
this[READENTRY] = null
this[WRITEENTRY] = null
this[STATE] = 'begin'
this[META] = ''
this[EX] = null
this[GEX] = null
this[ENDED] = false
this[UNZIP] = null
this[ABORTED] = false
this[SAW_NULL_BLOCK] = false
this[SAW_EOF] = false
this.on('end', () => this[CLOSESTREAM]())
if (typeof opt.onwarn === 'function') {
this.on('warn', opt.onwarn)
}
if (typeof opt.onentry === 'function') {
this.on('entry', opt.onentry)
}
}
[CONSUMEHEADER] (chunk, position) {
if (this[SAW_VALID_ENTRY] === null) {
this[SAW_VALID_ENTRY] = false
}
let header
try {
header = new Header(chunk, position, this[EX], this[GEX])
} catch (er) {
return this.warn('TAR_ENTRY_INVALID', er)
}
if (header.nullBlock) {
if (this[SAW_NULL_BLOCK]) {
this[SAW_EOF] = true
// ending an archive with no entries. pointless, but legal.
if (this[STATE] === 'begin') {
this[STATE] = 'header'
}
this[EMIT]('eof')
} else {
this[SAW_NULL_BLOCK] = true
this[EMIT]('nullBlock')
}
} else {
this[SAW_NULL_BLOCK] = false
if (!header.cksumValid) {
this.warn('TAR_ENTRY_INVALID', 'checksum failure', { header })
} else if (!header.path) {
this.warn('TAR_ENTRY_INVALID', 'path is required', { header })
} else {
const type = header.type
if (/^(Symbolic)?Link$/.test(type) && !header.linkpath) {
this.warn('TAR_ENTRY_INVALID', 'linkpath required', { header })
} else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath) {
this.warn('TAR_ENTRY_INVALID', 'linkpath forbidden', { header })
} else {
const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
// we do this for meta & ignored entries as well, because they
// are still valid tar, or else we wouldn't know to ignore them
if (!this[SAW_VALID_ENTRY]) {
if (entry.remain) {
// this might be the one!
const onend = () => {
if (!entry.invalid) {
this[SAW_VALID_ENTRY] = true
}
}
entry.on('end', onend)
} else {
this[SAW_VALID_ENTRY] = true
}
}
if (entry.meta) {
if (entry.size > this.maxMetaEntrySize) {
entry.ignore = true
this[EMIT]('ignoredEntry', entry)
this[STATE] = 'ignore'
entry.resume()
} else if (entry.size > 0) {
this[META] = ''
entry.on('data', c => this[META] += c)
this[STATE] = 'meta'
}
} else {
this[EX] = null
entry.ignore = entry.ignore || !this.filter(entry.path, entry)
if (entry.ignore) {
// probably valid, just not something we care about
this[EMIT]('ignoredEntry', entry)
this[STATE] = entry.remain ? 'ignore' : 'header'
entry.resume()
} else {
if (entry.remain) {
this[STATE] = 'body'
} else {
this[STATE] = 'header'
entry.end()
}
if (!this[READENTRY]) {
this[QUEUE].push(entry)
this[NEXTENTRY]()
} else {
this[QUEUE].push(entry)
}
}
}
}
}
}
}
[CLOSESTREAM] () {
nextTick(() => this.emit('close'))
}
[PROCESSENTRY] (entry) {
let go = true
if (!entry) {
this[READENTRY] = null
go = false
} else if (Array.isArray(entry)) {
this.emit.apply(this, entry)
} else {
this[READENTRY] = entry
this.emit('entry', entry)
if (!entry.emittedEnd) {
entry.on('end', _ => this[NEXTENTRY]())
go = false
}
}
return go
}
[NEXTENTRY] () {
do {} while (this[PROCESSENTRY](this[QUEUE].shift()))
if (!this[QUEUE].length) {
// At this point, there's nothing in the queue, but we may have an
// entry which is being consumed (readEntry).
// If we don't, then we definitely can handle more data.
// If we do, and either it's flowing, or it has never had any data
// written to it, then it needs more.
// The only other possibility is that it has returned false from a
// write() call, so we wait for the next drain to continue.
const re = this[READENTRY]
const drainNow = !re || re.flowing || re.size === re.remain
if (drainNow) {
if (!this[WRITING]) {
this.emit('drain')
}
} else {
re.once('drain', _ => this.emit('drain'))
}
}
}
[CONSUMEBODY] (chunk, position) {
// write up to but no more than writeEntry.blockRemain
const entry = this[WRITEENTRY]
const br = entry.blockRemain
const c = (br >= chunk.length && position === 0) ? chunk
: chunk.slice(position, position + br)
entry.write(c)
if (!entry.blockRemain) {
this[STATE] = 'header'
this[WRITEENTRY] = null
entry.end()
}
return c.length
}
[CONSUMEMETA] (chunk, position) {
const entry = this[WRITEENTRY]
const ret = this[CONSUMEBODY](chunk, position)
// if we finished, then the entry is reset
if (!this[WRITEENTRY]) {
this[EMITMETA](entry)
}
return ret
}
[EMIT] (ev, data, extra) {
if (!this[QUEUE].length && !this[READENTRY]) {
this.emit(ev, data, extra)
} else {
this[QUEUE].push([ev, data, extra])
}
}
[EMITMETA] (entry) {
this[EMIT]('meta', this[META])
switch (entry.type) {
case 'ExtendedHeader':
case 'OldExtendedHeader':
this[EX] = Pax.parse(this[META], this[EX], false)
break
case 'GlobalExtendedHeader':
this[GEX] = Pax.parse(this[META], this[GEX], true)
break
case 'NextFileHasLongPath':
case 'OldGnuLongPath':
this[EX] = this[EX] || Object.create(null)
this[EX].path = this[META].replace(/\0.*/, '')
break
case 'NextFileHasLongLinkpath':
this[EX] = this[EX] || Object.create(null)
this[EX].linkpath = this[META].replace(/\0.*/, '')
break
/* istanbul ignore next */
default: throw new Error('unknown meta: ' + entry.type)
}
}
abort (error) {
this[ABORTED] = true
this.emit('abort', error)
// always throws, even in non-strict mode
this.warn('TAR_ABORT', error, { recoverable: false })
}
write (chunk) {
if (this[ABORTED]) {
return
}
// first write, might be gzipped
const needSniff = this[UNZIP] === null ||
this.brotli === undefined && this[UNZIP] === false
if (needSniff && chunk) {
if (this[BUFFER]) {
chunk = Buffer.concat([this[BUFFER], chunk])
this[BUFFER] = null
}
if (chunk.length < gzipHeader.length) {
this[BUFFER] = chunk
return true
}
// look for gzip header
for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) {
if (chunk[i] !== gzipHeader[i]) {
this[UNZIP] = false
}
}
const maybeBrotli = this.brotli === undefined
if (this[UNZIP] === false && maybeBrotli) {
// read the first header to see if it's a valid tar file. If so,
// we can safely assume that it's not actually brotli, despite the
// .tbr or .tar.br file extension.
// if we ended before getting a full chunk, yes, def brotli
if (chunk.length < 512) {
if (this[ENDED]) {
this.brotli = true
} else {
this[BUFFER] = chunk
return true
}
} else {
// if it's tar, it's pretty reliably not brotli, chances of
// that happening are astronomical.
try {
new Header(chunk.slice(0, 512))
this.brotli = false
} catch (_) {
this.brotli = true
}
}
}
if (this[UNZIP] === null || (this[UNZIP] === false && this.brotli)) {
const ended = this[ENDED]
this[ENDED] = false
this[UNZIP] = this[UNZIP] === null
? new zlib.Unzip()
: new zlib.BrotliDecompress()
this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
this[UNZIP].on('error', er => this.abort(er))
this[UNZIP].on('end', _ => {
this[ENDED] = true
this[CONSUMECHUNK]()
})
this[WRITING] = true
const ret = this[UNZIP][ended ? 'end' : 'write'](chunk)
this[WRITING] = false
return ret
}
}
this[WRITING] = true
if (this[UNZIP]) {
this[UNZIP].write(chunk)
} else {
this[CONSUMECHUNK](chunk)
}
this[WRITING] = false
// return false if there's a queue, or if the current entry isn't flowing
const ret =
this[QUEUE].length ? false :
this[READENTRY] ? this[READENTRY].flowing :
true
// if we have no queue, then that means a clogged READENTRY
if (!ret && !this[QUEUE].length) {
this[READENTRY].once('drain', _ => this.emit('drain'))
}
return ret
}
[BUFFERCONCAT] (c) {
if (c && !this[ABORTED]) {
this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c
}
}
[MAYBEEND] () {
if (this[ENDED] &&
!this[EMITTEDEND] &&
!this[ABORTED] &&
!this[CONSUMING]) {
this[EMITTEDEND] = true
const entry = this[WRITEENTRY]
if (entry && entry.blockRemain) {
// truncated, likely a damaged file
const have = this[BUFFER] ? this[BUFFER].length : 0
this.warn('TAR_BAD_ARCHIVE', `Truncated input (needed ${
entry.blockRemain} more bytes, only ${have} available)`, { entry })
if (this[BUFFER]) {
entry.write(this[BUFFER])
}
entry.end()
}
this[EMIT](DONE)
}
}
[CONSUMECHUNK] (chunk) {
if (this[CONSUMING]) {
this[BUFFERCONCAT](chunk)
} else if (!chunk && !this[BUFFER]) {
this[MAYBEEND]()
} else {
this[CONSUMING] = true
if (this[BUFFER]) {
this[BUFFERCONCAT](chunk)
const c = this[BUFFER]
this[BUFFER] = null
this[CONSUMECHUNKSUB](c)
} else {
this[CONSUMECHUNKSUB](chunk)
}
while (this[BUFFER] &&
this[BUFFER].length >= 512 &&
!this[ABORTED] &&
!this[SAW_EOF]) {
const c = this[BUFFER]
this[BUFFER] = null
this[CONSUMECHUNKSUB](c)
}
this[CONSUMING] = false
}
if (!this[BUFFER] || this[ENDED]) {
this[MAYBEEND]()
}
}
[CONSUMECHUNKSUB] (chunk) {
// we know that we are in CONSUMING mode, so anything written goes into
// the buffer. Advance the position and put any remainder in the buffer.
let position = 0
const length = chunk.length
while (position + 512 <= length && !this[ABORTED] && !this[SAW_EOF]) {
switch (this[STATE]) {
case 'begin':
case 'header':
this[CONSUMEHEADER](chunk, position)
position += 512
break
case 'ignore':
case 'body':
position += this[CONSUMEBODY](chunk, position)
break
case 'meta':
position += this[CONSUMEMETA](chunk, position)
break
/* istanbul ignore next */
default:
throw new Error('invalid state: ' + this[STATE])
}
}
if (position < length) {
if (this[BUFFER]) {
this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]])
} else {
this[BUFFER] = chunk.slice(position)
}
}
}
end (chunk) {
if (!this[ABORTED]) {
if (this[UNZIP]) {
this[UNZIP].end(chunk)
} else {
this[ENDED] = true
if (this.brotli === undefined) chunk = chunk || Buffer.alloc(0)
this.write(chunk)
}
}
}
})

View File

@ -0,0 +1,156 @@
// A path exclusive reservation system
// reserve([list, of, paths], fn)
// When the fn is first in line for all its paths, it
// is called with a cb that clears the reservation.
//
// Used by async unpack to avoid clobbering paths in use,
// while still allowing maximal safe parallelization.
const assert = require('assert')
const normalize = require('./normalize-unicode.js')
const stripSlashes = require('./strip-trailing-slashes.js')
const { join } = require('path')
const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
const isWindows = platform === 'win32'
module.exports = () => {
// path => [function or Set]
// A Set object means a directory reservation
// A fn is a direct reservation on that path
const queues = new Map()
// fn => {paths:[path,...], dirs:[path, ...]}
const reservations = new Map()
// return a set of parent dirs for a given path
// '/a/b/c/d' -> ['/', '/a', '/a/b', '/a/b/c', '/a/b/c/d']
const getDirs = path => {
const dirs = path.split('/').slice(0, -1).reduce((set, path) => {
if (set.length) {
path = join(set[set.length - 1], path)
}
set.push(path || '/')
return set
}, [])
return dirs
}
// functions currently running
const running = new Set()
// return the queues for each path the function cares about
// fn => {paths, dirs}
const getQueues = fn => {
const res = reservations.get(fn)
/* istanbul ignore if - unpossible */
if (!res) {
throw new Error('function does not have any path reservations')
}
return {
paths: res.paths.map(path => queues.get(path)),
dirs: [...res.dirs].map(path => queues.get(path)),
}
}
// check if fn is first in line for all its paths, and is
// included in the first set for all its dir queues
const check = fn => {
const { paths, dirs } = getQueues(fn)
return paths.every(q => q[0] === fn) &&
dirs.every(q => q[0] instanceof Set && q[0].has(fn))
}
// run the function if it's first in line and not already running
const run = fn => {
if (running.has(fn) || !check(fn)) {
return false
}
running.add(fn)
fn(() => clear(fn))
return true
}
const clear = fn => {
if (!running.has(fn)) {
return false
}
const { paths, dirs } = reservations.get(fn)
const next = new Set()
paths.forEach(path => {
const q = queues.get(path)
assert.equal(q[0], fn)
if (q.length === 1) {
queues.delete(path)
} else {
q.shift()
if (typeof q[0] === 'function') {
next.add(q[0])
} else {
q[0].forEach(fn => next.add(fn))
}
}
})
dirs.forEach(dir => {
const q = queues.get(dir)
assert(q[0] instanceof Set)
if (q[0].size === 1 && q.length === 1) {
queues.delete(dir)
} else if (q[0].size === 1) {
q.shift()
// must be a function or else the Set would've been reused
next.add(q[0])
} else {
q[0].delete(fn)
}
})
running.delete(fn)
next.forEach(fn => run(fn))
return true
}
const reserve = (paths, fn) => {
// collide on matches across case and unicode normalization
// On windows, thanks to the magic of 8.3 shortnames, it is fundamentally
// impossible to determine whether two paths refer to the same thing on
// disk, without asking the kernel for a shortname.
// So, we just pretend that every path matches every other path here,
// effectively removing all parallelization on windows.
paths = isWindows ? ['win32 parallelization disabled'] : paths.map(p => {
// don't need normPath, because we skip this entirely for windows
return stripSlashes(join(normalize(p))).toLowerCase()
})
const dirs = new Set(
paths.map(path => getDirs(path)).reduce((a, b) => a.concat(b))
)
reservations.set(fn, { dirs, paths })
paths.forEach(path => {
const q = queues.get(path)
if (!q) {
queues.set(path, [fn])
} else {
q.push(fn)
}
})
dirs.forEach(dir => {
const q = queues.get(dir)
if (!q) {
queues.set(dir, [new Set([fn])])
} else if (q[q.length - 1] instanceof Set) {
q[q.length - 1].add(fn)
} else {
q.push(new Set([fn]))
}
})
return run(fn)
}
return { check, reserve }
}

150
backend/apis/nodejs/node_modules/tar/lib/pax.js generated vendored Normal file
View File

@ -0,0 +1,150 @@
'use strict'
const Header = require('./header.js')
const path = require('path')
class Pax {
constructor (obj, global) {
this.atime = obj.atime || null
this.charset = obj.charset || null
this.comment = obj.comment || null
this.ctime = obj.ctime || null
this.gid = obj.gid || null
this.gname = obj.gname || null
this.linkpath = obj.linkpath || null
this.mtime = obj.mtime || null
this.path = obj.path || null
this.size = obj.size || null
this.uid = obj.uid || null
this.uname = obj.uname || null
this.dev = obj.dev || null
this.ino = obj.ino || null
this.nlink = obj.nlink || null
this.global = global || false
}
encode () {
const body = this.encodeBody()
if (body === '') {
return null
}
const bodyLen = Buffer.byteLength(body)
// round up to 512 bytes
// add 512 for header
const bufLen = 512 * Math.ceil(1 + bodyLen / 512)
const buf = Buffer.allocUnsafe(bufLen)
// 0-fill the header section, it might not hit every field
for (let i = 0; i < 512; i++) {
buf[i] = 0
}
new Header({
// XXX split the path
// then the path should be PaxHeader + basename, but less than 99,
// prepend with the dirname
path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99),
mode: this.mode || 0o644,
uid: this.uid || null,
gid: this.gid || null,
size: bodyLen,
mtime: this.mtime || null,
type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader',
linkpath: '',
uname: this.uname || '',
gname: this.gname || '',
devmaj: 0,
devmin: 0,
atime: this.atime || null,
ctime: this.ctime || null,
}).encode(buf)
buf.write(body, 512, bodyLen, 'utf8')
// null pad after the body
for (let i = bodyLen + 512; i < buf.length; i++) {
buf[i] = 0
}
return buf
}
encodeBody () {
return (
this.encodeField('path') +
this.encodeField('ctime') +
this.encodeField('atime') +
this.encodeField('dev') +
this.encodeField('ino') +
this.encodeField('nlink') +
this.encodeField('charset') +
this.encodeField('comment') +
this.encodeField('gid') +
this.encodeField('gname') +
this.encodeField('linkpath') +
this.encodeField('mtime') +
this.encodeField('size') +
this.encodeField('uid') +
this.encodeField('uname')
)
}
encodeField (field) {
if (this[field] === null || this[field] === undefined) {
return ''
}
const v = this[field] instanceof Date ? this[field].getTime() / 1000
: this[field]
const s = ' ' +
(field === 'dev' || field === 'ino' || field === 'nlink'
? 'SCHILY.' : '') +
field + '=' + v + '\n'
const byteLen = Buffer.byteLength(s)
// the digits includes the length of the digits in ascii base-10
// so if it's 9 characters, then adding 1 for the 9 makes it 10
// which makes it 11 chars.
let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1
if (byteLen + digits >= Math.pow(10, digits)) {
digits += 1
}
const len = digits + byteLen
return len + s
}
}
Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g)
const merge = (a, b) =>
b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a
const parseKV = string =>
string
.replace(/\n$/, '')
.split('\n')
.reduce(parseKVLine, Object.create(null))
const parseKVLine = (set, line) => {
const n = parseInt(line, 10)
// XXX Values with \n in them will fail this.
// Refactor to not be a naive line-by-line parse.
if (n !== Buffer.byteLength(line) + 1) {
return set
}
line = line.slice((n + ' ').length)
const kv = line.split('=')
const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1')
if (!k) {
return set
}
const v = kv.join('=')
set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k)
? new Date(v * 1000)
: /^[0-9]+$/.test(v) ? +v
: v
return set
}
module.exports = Pax

107
backend/apis/nodejs/node_modules/tar/lib/read-entry.js generated vendored Normal file
View File

@ -0,0 +1,107 @@
'use strict'
const { Minipass } = require('minipass')
const normPath = require('./normalize-windows-path.js')
const SLURP = Symbol('slurp')
module.exports = class ReadEntry extends Minipass {
constructor (header, ex, gex) {
super()
// read entries always start life paused. this is to avoid the
// situation where Minipass's auto-ending empty streams results
// in an entry ending before we're ready for it.
this.pause()
this.extended = ex
this.globalExtended = gex
this.header = header
this.startBlockSize = 512 * Math.ceil(header.size / 512)
this.blockRemain = this.startBlockSize
this.remain = header.size
this.type = header.type
this.meta = false
this.ignore = false
switch (this.type) {
case 'File':
case 'OldFile':
case 'Link':
case 'SymbolicLink':
case 'CharacterDevice':
case 'BlockDevice':
case 'Directory':
case 'FIFO':
case 'ContiguousFile':
case 'GNUDumpDir':
break
case 'NextFileHasLongLinkpath':
case 'NextFileHasLongPath':
case 'OldGnuLongPath':
case 'GlobalExtendedHeader':
case 'ExtendedHeader':
case 'OldExtendedHeader':
this.meta = true
break
// NOTE: gnutar and bsdtar treat unrecognized types as 'File'
// it may be worth doing the same, but with a warning.
default:
this.ignore = true
}
this.path = normPath(header.path)
this.mode = header.mode
if (this.mode) {
this.mode = this.mode & 0o7777
}
this.uid = header.uid
this.gid = header.gid
this.uname = header.uname
this.gname = header.gname
this.size = header.size
this.mtime = header.mtime
this.atime = header.atime
this.ctime = header.ctime
this.linkpath = normPath(header.linkpath)
this.uname = header.uname
this.gname = header.gname
if (ex) {
this[SLURP](ex)
}
if (gex) {
this[SLURP](gex, true)
}
}
write (data) {
const writeLen = data.length
if (writeLen > this.blockRemain) {
throw new Error('writing more to entry than is appropriate')
}
const r = this.remain
const br = this.blockRemain
this.remain = Math.max(0, r - writeLen)
this.blockRemain = Math.max(0, br - writeLen)
if (this.ignore) {
return true
}
if (r >= writeLen) {
return super.write(data)
}
// r < writeLen
return super.write(data.slice(0, r))
}
[SLURP] (ex, global) {
for (const k in ex) {
// we slurp in everything except for the path attribute in
// a global extended header, because that's weird.
if (ex[k] !== null && ex[k] !== undefined &&
!(global && k === 'path')) {
this[k] = k === 'path' || k === 'linkpath' ? normPath(ex[k]) : ex[k]
}
}
}
}

246
backend/apis/nodejs/node_modules/tar/lib/replace.js generated vendored Normal file
View File

@ -0,0 +1,246 @@
'use strict'
// tar -r
const hlo = require('./high-level-opt.js')
const Pack = require('./pack.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const t = require('./list.js')
const path = require('path')
// starting at the head of the file, read a Header
// If the checksum is invalid, that's our position to start writing
// If it is, jump forward by the specified size (round up to 512)
// and try again.
// Write the new Pack stream starting there.
const Header = require('./header.js')
module.exports = (opt_, files, cb) => {
const opt = hlo(opt_)
if (!opt.file) {
throw new TypeError('file is required')
}
if (opt.gzip || opt.brotli || opt.file.endsWith('.br') || opt.file.endsWith('.tbr')) {
throw new TypeError('cannot append to compressed archives')
}
if (!files || !Array.isArray(files) || !files.length) {
throw new TypeError('no files or directories specified')
}
files = Array.from(files)
return opt.sync ? replaceSync(opt, files)
: replace(opt, files, cb)
}
const replaceSync = (opt, files) => {
const p = new Pack.Sync(opt)
let threw = true
let fd
let position
try {
try {
fd = fs.openSync(opt.file, 'r+')
} catch (er) {
if (er.code === 'ENOENT') {
fd = fs.openSync(opt.file, 'w+')
} else {
throw er
}
}
const st = fs.fstatSync(fd)
const headBuf = Buffer.alloc(512)
POSITION: for (position = 0; position < st.size; position += 512) {
for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
bytes = fs.readSync(
fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
)
if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b) {
throw new Error('cannot append to compressed archives')
}
if (!bytes) {
break POSITION
}
}
const h = new Header(headBuf)
if (!h.cksumValid) {
break
}
const entryBlockSize = 512 * Math.ceil(h.size / 512)
if (position + entryBlockSize + 512 > st.size) {
break
}
// the 512 for the header we just parsed will be added as well
// also jump ahead all the blocks for the body
position += entryBlockSize
if (opt.mtimeCache) {
opt.mtimeCache.set(h.path, h.mtime)
}
}
threw = false
streamSync(opt, p, position, fd, files)
} finally {
if (threw) {
try {
fs.closeSync(fd)
} catch (er) {}
}
}
}
const streamSync = (opt, p, position, fd, files) => {
const stream = new fsm.WriteStreamSync(opt.file, {
fd: fd,
start: position,
})
p.pipe(stream)
addFilesSync(p, files)
}
const replace = (opt, files, cb) => {
files = Array.from(files)
const p = new Pack(opt)
const getPos = (fd, size, cb_) => {
const cb = (er, pos) => {
if (er) {
fs.close(fd, _ => cb_(er))
} else {
cb_(null, pos)
}
}
let position = 0
if (size === 0) {
return cb(null, 0)
}
let bufPos = 0
const headBuf = Buffer.alloc(512)
const onread = (er, bytes) => {
if (er) {
return cb(er)
}
bufPos += bytes
if (bufPos < 512 && bytes) {
return fs.read(
fd, headBuf, bufPos, headBuf.length - bufPos,
position + bufPos, onread
)
}
if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b) {
return cb(new Error('cannot append to compressed archives'))
}
// truncated header
if (bufPos < 512) {
return cb(null, position)
}
const h = new Header(headBuf)
if (!h.cksumValid) {
return cb(null, position)
}
const entryBlockSize = 512 * Math.ceil(h.size / 512)
if (position + entryBlockSize + 512 > size) {
return cb(null, position)
}
position += entryBlockSize + 512
if (position >= size) {
return cb(null, position)
}
if (opt.mtimeCache) {
opt.mtimeCache.set(h.path, h.mtime)
}
bufPos = 0
fs.read(fd, headBuf, 0, 512, position, onread)
}
fs.read(fd, headBuf, 0, 512, position, onread)
}
const promise = new Promise((resolve, reject) => {
p.on('error', reject)
let flag = 'r+'
const onopen = (er, fd) => {
if (er && er.code === 'ENOENT' && flag === 'r+') {
flag = 'w+'
return fs.open(opt.file, flag, onopen)
}
if (er) {
return reject(er)
}
fs.fstat(fd, (er, st) => {
if (er) {
return fs.close(fd, () => reject(er))
}
getPos(fd, st.size, (er, position) => {
if (er) {
return reject(er)
}
const stream = new fsm.WriteStream(opt.file, {
fd: fd,
start: position,
})
p.pipe(stream)
stream.on('error', reject)
stream.on('close', resolve)
addFilesAsync(p, files)
})
})
}
fs.open(opt.file, flag, onopen)
})
return cb ? promise.then(cb, cb) : promise
}
const addFilesSync = (p, files) => {
files.forEach(file => {
if (file.charAt(0) === '@') {
t({
file: path.resolve(p.cwd, file.slice(1)),
sync: true,
noResume: true,
onentry: entry => p.add(entry),
})
} else {
p.add(file)
}
})
p.end()
}
const addFilesAsync = (p, files) => {
while (files.length) {
const file = files.shift()
if (file.charAt(0) === '@') {
return t({
file: path.resolve(p.cwd, file.slice(1)),
noResume: true,
onentry: entry => p.add(entry),
}).then(_ => addFilesAsync(p, files))
} else {
p.add(file)
}
}
p.end()
}

View File

@ -0,0 +1,24 @@
// unix absolute paths are also absolute on win32, so we use this for both
const { isAbsolute, parse } = require('path').win32
// returns [root, stripped]
// Note that windows will think that //x/y/z/a has a "root" of //x/y, and in
// those cases, we want to sanitize it to x/y/z/a, not z/a, so we strip /
// explicitly if it's the first character.
// drive-specific relative paths on Windows get their root stripped off even
// though they are not absolute, so `c:../foo` becomes ['c:', '../foo']
module.exports = path => {
let r = ''
let parsed = parse(path)
while (isAbsolute(path) || parsed.root) {
// windows will think that //x/y/z has a "root" of //x/y/
// but strip the //?/C:/ off of //?/C:/path
const root = path.charAt(0) === '/' && path.slice(0, 4) !== '//?/' ? '/'
: parsed.root
path = path.slice(root.length)
r += root
parsed = parse(path)
}
return [r, path]
}

View File

@ -0,0 +1,13 @@
// warning: extremely hot code path.
// This has been meticulously optimized for use
// within npm install on large package trees.
// Do not edit without careful benchmarking.
module.exports = str => {
let i = str.length - 1
let slashesStart = -1
while (i > -1 && str.charAt(i) === '/') {
slashesStart = i
i--
}
return slashesStart === -1 ? str : str.slice(0, slashesStart)
}

44
backend/apis/nodejs/node_modules/tar/lib/types.js generated vendored Normal file
View File

@ -0,0 +1,44 @@
'use strict'
// map types from key to human-friendly name
exports.name = new Map([
['0', 'File'],
// same as File
['', 'OldFile'],
['1', 'Link'],
['2', 'SymbolicLink'],
// Devices and FIFOs aren't fully supported
// they are parsed, but skipped when unpacking
['3', 'CharacterDevice'],
['4', 'BlockDevice'],
['5', 'Directory'],
['6', 'FIFO'],
// same as File
['7', 'ContiguousFile'],
// pax headers
['g', 'GlobalExtendedHeader'],
['x', 'ExtendedHeader'],
// vendor-specific stuff
// skip
['A', 'SolarisACL'],
// like 5, but with data, which should be skipped
['D', 'GNUDumpDir'],
// metadata only, skip
['I', 'Inode'],
// data = link path of next file
['K', 'NextFileHasLongLinkpath'],
// data = path of next file
['L', 'NextFileHasLongPath'],
// skip
['M', 'ContinuationFile'],
// like L
['N', 'OldGnuLongPath'],
// skip
['S', 'SparseFile'],
// skip
['V', 'TapeVolumeHeader'],
// like x
['X', 'OldExtendedHeader'],
])
// map the other direction
exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]]))

923
backend/apis/nodejs/node_modules/tar/lib/unpack.js generated vendored Normal file
View File

@ -0,0 +1,923 @@
'use strict'
// the PEND/UNPEND stuff tracks whether we're ready to emit end/close yet.
// but the path reservations are required to avoid race conditions where
// parallelized unpack ops may mess with one another, due to dependencies
// (like a Link depending on its target) or destructive operations (like
// clobbering an fs object to create one of a different type.)
const assert = require('assert')
const Parser = require('./parse.js')
const fs = require('fs')
const fsm = require('fs-minipass')
const path = require('path')
const mkdir = require('./mkdir.js')
const wc = require('./winchars.js')
const pathReservations = require('./path-reservations.js')
const stripAbsolutePath = require('./strip-absolute-path.js')
const normPath = require('./normalize-windows-path.js')
const stripSlash = require('./strip-trailing-slashes.js')
const normalize = require('./normalize-unicode.js')
const ONENTRY = Symbol('onEntry')
const CHECKFS = Symbol('checkFs')
const CHECKFS2 = Symbol('checkFs2')
const PRUNECACHE = Symbol('pruneCache')
const ISREUSABLE = Symbol('isReusable')
const MAKEFS = Symbol('makeFs')
const FILE = Symbol('file')
const DIRECTORY = Symbol('directory')
const LINK = Symbol('link')
const SYMLINK = Symbol('symlink')
const HARDLINK = Symbol('hardlink')
const UNSUPPORTED = Symbol('unsupported')
const CHECKPATH = Symbol('checkPath')
const MKDIR = Symbol('mkdir')
const ONERROR = Symbol('onError')
const PENDING = Symbol('pending')
const PEND = Symbol('pend')
const UNPEND = Symbol('unpend')
const ENDED = Symbol('ended')
const MAYBECLOSE = Symbol('maybeClose')
const SKIP = Symbol('skip')
const DOCHOWN = Symbol('doChown')
const UID = Symbol('uid')
const GID = Symbol('gid')
const CHECKED_CWD = Symbol('checkedCwd')
const crypto = require('crypto')
const getFlag = require('./get-write-flag.js')
const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
const isWindows = platform === 'win32'
const DEFAULT_MAX_DEPTH = 1024
// Unlinks on Windows are not atomic.
//
// This means that if you have a file entry, followed by another
// file entry with an identical name, and you cannot re-use the file
// (because it's a hardlink, or because unlink:true is set, or it's
// Windows, which does not have useful nlink values), then the unlink
// will be committed to the disk AFTER the new file has been written
// over the old one, deleting the new file.
//
// To work around this, on Windows systems, we rename the file and then
// delete the renamed file. It's a sloppy kludge, but frankly, I do not
// know of a better way to do this, given windows' non-atomic unlink
// semantics.
//
// See: https://github.com/npm/node-tar/issues/183
/* istanbul ignore next */
const unlinkFile = (path, cb) => {
if (!isWindows) {
return fs.unlink(path, cb)
}
const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
fs.rename(path, name, er => {
if (er) {
return cb(er)
}
fs.unlink(name, cb)
})
}
/* istanbul ignore next */
const unlinkFileSync = path => {
if (!isWindows) {
return fs.unlinkSync(path)
}
const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
fs.renameSync(path, name)
fs.unlinkSync(name)
}
// this.gid, entry.gid, this.processUid
const uint32 = (a, b, c) =>
a === a >>> 0 ? a
: b === b >>> 0 ? b
: c
// clear the cache if it's a case-insensitive unicode-squashing match.
// we can't know if the current file system is case-sensitive or supports
// unicode fully, so we check for similarity on the maximally compatible
// representation. Err on the side of pruning, since all it's doing is
// preventing lstats, and it's not the end of the world if we get a false
// positive.
// Note that on windows, we always drop the entire cache whenever a
// symbolic link is encountered, because 8.3 filenames are impossible
// to reason about, and collisions are hazards rather than just failures.
const cacheKeyNormalize = path => stripSlash(normPath(normalize(path)))
.toLowerCase()
const pruneCache = (cache, abs) => {
abs = cacheKeyNormalize(abs)
for (const path of cache.keys()) {
const pnorm = cacheKeyNormalize(path)
if (pnorm === abs || pnorm.indexOf(abs + '/') === 0) {
cache.delete(path)
}
}
}
const dropCache = cache => {
for (const key of cache.keys()) {
cache.delete(key)
}
}
class Unpack extends Parser {
constructor (opt) {
if (!opt) {
opt = {}
}
opt.ondone = _ => {
this[ENDED] = true
this[MAYBECLOSE]()
}
super(opt)
this[CHECKED_CWD] = false
this.reservations = pathReservations()
this.transform = typeof opt.transform === 'function' ? opt.transform : null
this.writable = true
this.readable = false
this[PENDING] = 0
this[ENDED] = false
this.dirCache = opt.dirCache || new Map()
if (typeof opt.uid === 'number' || typeof opt.gid === 'number') {
// need both or neither
if (typeof opt.uid !== 'number' || typeof opt.gid !== 'number') {
throw new TypeError('cannot set owner without number uid and gid')
}
if (opt.preserveOwner) {
throw new TypeError(
'cannot preserve owner in archive and also set owner explicitly')
}
this.uid = opt.uid
this.gid = opt.gid
this.setOwner = true
} else {
this.uid = null
this.gid = null
this.setOwner = false
}
// default true for root
if (opt.preserveOwner === undefined && typeof opt.uid !== 'number') {
this.preserveOwner = process.getuid && process.getuid() === 0
} else {
this.preserveOwner = !!opt.preserveOwner
}
this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ?
process.getuid() : null
this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ?
process.getgid() : null
// prevent excessively deep nesting of subfolders
// set to `Infinity` to remove this restriction
this.maxDepth = typeof opt.maxDepth === 'number'
? opt.maxDepth
: DEFAULT_MAX_DEPTH
// mostly just for testing, but useful in some cases.
// Forcibly trigger a chown on every entry, no matter what
this.forceChown = opt.forceChown === true
// turn ><?| in filenames into 0xf000-higher encoded forms
this.win32 = !!opt.win32 || isWindows
// do not unpack over files that are newer than what's in the archive
this.newer = !!opt.newer
// do not unpack over ANY files
this.keep = !!opt.keep
// do not set mtime/atime of extracted entries
this.noMtime = !!opt.noMtime
// allow .., absolute path entries, and unpacking through symlinks
// without this, warn and skip .., relativize absolutes, and error
// on symlinks in extraction path
this.preservePaths = !!opt.preservePaths
// unlink files and links before writing. This breaks existing hard
// links, and removes symlink directories rather than erroring
this.unlink = !!opt.unlink
this.cwd = normPath(path.resolve(opt.cwd || process.cwd()))
this.strip = +opt.strip || 0
// if we're not chmodding, then we don't need the process umask
this.processUmask = opt.noChmod ? 0 : process.umask()
this.umask = typeof opt.umask === 'number' ? opt.umask : this.processUmask
// default mode for dirs created as parents
this.dmode = opt.dmode || (0o0777 & (~this.umask))
this.fmode = opt.fmode || (0o0666 & (~this.umask))
this.on('entry', entry => this[ONENTRY](entry))
}
// a bad or damaged archive is a warning for Parser, but an error
// when extracting. Mark those errors as unrecoverable, because
// the Unpack contract cannot be met.
warn (code, msg, data = {}) {
if (code === 'TAR_BAD_ARCHIVE' || code === 'TAR_ABORT') {
data.recoverable = false
}
return super.warn(code, msg, data)
}
[MAYBECLOSE] () {
if (this[ENDED] && this[PENDING] === 0) {
this.emit('prefinish')
this.emit('finish')
this.emit('end')
}
}
[CHECKPATH] (entry) {
const p = normPath(entry.path)
const parts = p.split('/')
if (this.strip) {
if (parts.length < this.strip) {
return false
}
if (entry.type === 'Link') {
const linkparts = normPath(entry.linkpath).split('/')
if (linkparts.length >= this.strip) {
entry.linkpath = linkparts.slice(this.strip).join('/')
} else {
return false
}
}
parts.splice(0, this.strip)
entry.path = parts.join('/')
}
if (isFinite(this.maxDepth) && parts.length > this.maxDepth) {
this.warn('TAR_ENTRY_ERROR', 'path excessively deep', {
entry,
path: p,
depth: parts.length,
maxDepth: this.maxDepth,
})
return false
}
if (!this.preservePaths) {
if (parts.includes('..') || isWindows && /^[a-z]:\.\.$/i.test(parts[0])) {
this.warn('TAR_ENTRY_ERROR', `path contains '..'`, {
entry,
path: p,
})
return false
}
// strip off the root
const [root, stripped] = stripAbsolutePath(p)
if (root) {
entry.path = stripped
this.warn('TAR_ENTRY_INFO', `stripping ${root} from absolute path`, {
entry,
path: p,
})
}
}
if (path.isAbsolute(entry.path)) {
entry.absolute = normPath(path.resolve(entry.path))
} else {
entry.absolute = normPath(path.resolve(this.cwd, entry.path))
}
// if we somehow ended up with a path that escapes the cwd, and we are
// not in preservePaths mode, then something is fishy! This should have
// been prevented above, so ignore this for coverage.
/* istanbul ignore if - defense in depth */
if (!this.preservePaths &&
entry.absolute.indexOf(this.cwd + '/') !== 0 &&
entry.absolute !== this.cwd) {
this.warn('TAR_ENTRY_ERROR', 'path escaped extraction target', {
entry,
path: normPath(entry.path),
resolvedPath: entry.absolute,
cwd: this.cwd,
})
return false
}
// an archive can set properties on the extraction directory, but it
// may not replace the cwd with a different kind of thing entirely.
if (entry.absolute === this.cwd &&
entry.type !== 'Directory' &&
entry.type !== 'GNUDumpDir') {
return false
}
// only encode : chars that aren't drive letter indicators
if (this.win32) {
const { root: aRoot } = path.win32.parse(entry.absolute)
entry.absolute = aRoot + wc.encode(entry.absolute.slice(aRoot.length))
const { root: pRoot } = path.win32.parse(entry.path)
entry.path = pRoot + wc.encode(entry.path.slice(pRoot.length))
}
return true
}
[ONENTRY] (entry) {
if (!this[CHECKPATH](entry)) {
return entry.resume()
}
assert.equal(typeof entry.absolute, 'string')
switch (entry.type) {
case 'Directory':
case 'GNUDumpDir':
if (entry.mode) {
entry.mode = entry.mode | 0o700
}
// eslint-disable-next-line no-fallthrough
case 'File':
case 'OldFile':
case 'ContiguousFile':
case 'Link':
case 'SymbolicLink':
return this[CHECKFS](entry)
case 'CharacterDevice':
case 'BlockDevice':
case 'FIFO':
default:
return this[UNSUPPORTED](entry)
}
}
[ONERROR] (er, entry) {
// Cwd has to exist, or else nothing works. That's serious.
// Other errors are warnings, which raise the error in strict
// mode, but otherwise continue on.
if (er.name === 'CwdError') {
this.emit('error', er)
} else {
this.warn('TAR_ENTRY_ERROR', er, { entry })
this[UNPEND]()
entry.resume()
}
}
[MKDIR] (dir, mode, cb) {
mkdir(normPath(dir), {
uid: this.uid,
gid: this.gid,
processUid: this.processUid,
processGid: this.processGid,
umask: this.processUmask,
preserve: this.preservePaths,
unlink: this.unlink,
cache: this.dirCache,
cwd: this.cwd,
mode: mode,
noChmod: this.noChmod,
}, cb)
}
[DOCHOWN] (entry) {
// in preserve owner mode, chown if the entry doesn't match process
// in set owner mode, chown if setting doesn't match process
return this.forceChown ||
this.preserveOwner &&
(typeof entry.uid === 'number' && entry.uid !== this.processUid ||
typeof entry.gid === 'number' && entry.gid !== this.processGid)
||
(typeof this.uid === 'number' && this.uid !== this.processUid ||
typeof this.gid === 'number' && this.gid !== this.processGid)
}
[UID] (entry) {
return uint32(this.uid, entry.uid, this.processUid)
}
[GID] (entry) {
return uint32(this.gid, entry.gid, this.processGid)
}
[FILE] (entry, fullyDone) {
const mode = entry.mode & 0o7777 || this.fmode
const stream = new fsm.WriteStream(entry.absolute, {
flags: getFlag(entry.size),
mode: mode,
autoClose: false,
})
stream.on('error', er => {
if (stream.fd) {
fs.close(stream.fd, () => {})
}
// flush all the data out so that we aren't left hanging
// if the error wasn't actually fatal. otherwise the parse
// is blocked, and we never proceed.
stream.write = () => true
this[ONERROR](er, entry)
fullyDone()
})
let actions = 1
const done = er => {
if (er) {
/* istanbul ignore else - we should always have a fd by now */
if (stream.fd) {
fs.close(stream.fd, () => {})
}
this[ONERROR](er, entry)
fullyDone()
return
}
if (--actions === 0) {
fs.close(stream.fd, er => {
if (er) {
this[ONERROR](er, entry)
} else {
this[UNPEND]()
}
fullyDone()
})
}
}
stream.on('finish', _ => {
// if futimes fails, try utimes
// if utimes fails, fail with the original error
// same for fchown/chown
const abs = entry.absolute
const fd = stream.fd
if (entry.mtime && !this.noMtime) {
actions++
const atime = entry.atime || new Date()
const mtime = entry.mtime
fs.futimes(fd, atime, mtime, er =>
er ? fs.utimes(abs, atime, mtime, er2 => done(er2 && er))
: done())
}
if (this[DOCHOWN](entry)) {
actions++
const uid = this[UID](entry)
const gid = this[GID](entry)
fs.fchown(fd, uid, gid, er =>
er ? fs.chown(abs, uid, gid, er2 => done(er2 && er))
: done())
}
done()
})
const tx = this.transform ? this.transform(entry) || entry : entry
if (tx !== entry) {
tx.on('error', er => {
this[ONERROR](er, entry)
fullyDone()
})
entry.pipe(tx)
}
tx.pipe(stream)
}
[DIRECTORY] (entry, fullyDone) {
const mode = entry.mode & 0o7777 || this.dmode
this[MKDIR](entry.absolute, mode, er => {
if (er) {
this[ONERROR](er, entry)
fullyDone()
return
}
let actions = 1
const done = _ => {
if (--actions === 0) {
fullyDone()
this[UNPEND]()
entry.resume()
}
}
if (entry.mtime && !this.noMtime) {
actions++
fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, done)
}
if (this[DOCHOWN](entry)) {
actions++
fs.chown(entry.absolute, this[UID](entry), this[GID](entry), done)
}
done()
})
}
[UNSUPPORTED] (entry) {
entry.unsupported = true
this.warn('TAR_ENTRY_UNSUPPORTED',
`unsupported entry type: ${entry.type}`, { entry })
entry.resume()
}
[SYMLINK] (entry, done) {
this[LINK](entry, entry.linkpath, 'symlink', done)
}
[HARDLINK] (entry, done) {
const linkpath = normPath(path.resolve(this.cwd, entry.linkpath))
this[LINK](entry, linkpath, 'link', done)
}
[PEND] () {
this[PENDING]++
}
[UNPEND] () {
this[PENDING]--
this[MAYBECLOSE]()
}
[SKIP] (entry) {
this[UNPEND]()
entry.resume()
}
// Check if we can reuse an existing filesystem entry safely and
// overwrite it, rather than unlinking and recreating
// Windows doesn't report a useful nlink, so we just never reuse entries
[ISREUSABLE] (entry, st) {
return entry.type === 'File' &&
!this.unlink &&
st.isFile() &&
st.nlink <= 1 &&
!isWindows
}
// check if a thing is there, and if so, try to clobber it
[CHECKFS] (entry) {
this[PEND]()
const paths = [entry.path]
if (entry.linkpath) {
paths.push(entry.linkpath)
}
this.reservations.reserve(paths, done => this[CHECKFS2](entry, done))
}
[PRUNECACHE] (entry) {
// if we are not creating a directory, and the path is in the dirCache,
// then that means we are about to delete the directory we created
// previously, and it is no longer going to be a directory, and neither
// is any of its children.
// If a symbolic link is encountered, all bets are off. There is no
// reasonable way to sanitize the cache in such a way we will be able to
// avoid having filesystem collisions. If this happens with a non-symlink
// entry, it'll just fail to unpack, but a symlink to a directory, using an
// 8.3 shortname or certain unicode attacks, can evade detection and lead
// to arbitrary writes to anywhere on the system.
if (entry.type === 'SymbolicLink') {
dropCache(this.dirCache)
} else if (entry.type !== 'Directory') {
pruneCache(this.dirCache, entry.absolute)
}
}
[CHECKFS2] (entry, fullyDone) {
this[PRUNECACHE](entry)
const done = er => {
this[PRUNECACHE](entry)
fullyDone(er)
}
const checkCwd = () => {
this[MKDIR](this.cwd, this.dmode, er => {
if (er) {
this[ONERROR](er, entry)
done()
return
}
this[CHECKED_CWD] = true
start()
})
}
const start = () => {
if (entry.absolute !== this.cwd) {
const parent = normPath(path.dirname(entry.absolute))
if (parent !== this.cwd) {
return this[MKDIR](parent, this.dmode, er => {
if (er) {
this[ONERROR](er, entry)
done()
return
}
afterMakeParent()
})
}
}
afterMakeParent()
}
const afterMakeParent = () => {
fs.lstat(entry.absolute, (lstatEr, st) => {
if (st && (this.keep || this.newer && st.mtime > entry.mtime)) {
this[SKIP](entry)
done()
return
}
if (lstatEr || this[ISREUSABLE](entry, st)) {
return this[MAKEFS](null, entry, done)
}
if (st.isDirectory()) {
if (entry.type === 'Directory') {
const needChmod = !this.noChmod &&
entry.mode &&
(st.mode & 0o7777) !== entry.mode
const afterChmod = er => this[MAKEFS](er, entry, done)
if (!needChmod) {
return afterChmod()
}
return fs.chmod(entry.absolute, entry.mode, afterChmod)
}
// Not a dir entry, have to remove it.
// NB: the only way to end up with an entry that is the cwd
// itself, in such a way that == does not detect, is a
// tricky windows absolute path with UNC or 8.3 parts (and
// preservePaths:true, or else it will have been stripped).
// In that case, the user has opted out of path protections
// explicitly, so if they blow away the cwd, c'est la vie.
if (entry.absolute !== this.cwd) {
return fs.rmdir(entry.absolute, er =>
this[MAKEFS](er, entry, done))
}
}
// not a dir, and not reusable
// don't remove if the cwd, we want that error
if (entry.absolute === this.cwd) {
return this[MAKEFS](null, entry, done)
}
unlinkFile(entry.absolute, er =>
this[MAKEFS](er, entry, done))
})
}
if (this[CHECKED_CWD]) {
start()
} else {
checkCwd()
}
}
[MAKEFS] (er, entry, done) {
if (er) {
this[ONERROR](er, entry)
done()
return
}
switch (entry.type) {
case 'File':
case 'OldFile':
case 'ContiguousFile':
return this[FILE](entry, done)
case 'Link':
return this[HARDLINK](entry, done)
case 'SymbolicLink':
return this[SYMLINK](entry, done)
case 'Directory':
case 'GNUDumpDir':
return this[DIRECTORY](entry, done)
}
}
[LINK] (entry, linkpath, link, done) {
// XXX: get the type ('symlink' or 'junction') for windows
fs[link](linkpath, entry.absolute, er => {
if (er) {
this[ONERROR](er, entry)
} else {
this[UNPEND]()
entry.resume()
}
done()
})
}
}
const callSync = fn => {
try {
return [null, fn()]
} catch (er) {
return [er, null]
}
}
class UnpackSync extends Unpack {
[MAKEFS] (er, entry) {
return super[MAKEFS](er, entry, () => {})
}
[CHECKFS] (entry) {
this[PRUNECACHE](entry)
if (!this[CHECKED_CWD]) {
const er = this[MKDIR](this.cwd, this.dmode)
if (er) {
return this[ONERROR](er, entry)
}
this[CHECKED_CWD] = true
}
// don't bother to make the parent if the current entry is the cwd,
// we've already checked it.
if (entry.absolute !== this.cwd) {
const parent = normPath(path.dirname(entry.absolute))
if (parent !== this.cwd) {
const mkParent = this[MKDIR](parent, this.dmode)
if (mkParent) {
return this[ONERROR](mkParent, entry)
}
}
}
const [lstatEr, st] = callSync(() => fs.lstatSync(entry.absolute))
if (st && (this.keep || this.newer && st.mtime > entry.mtime)) {
return this[SKIP](entry)
}
if (lstatEr || this[ISREUSABLE](entry, st)) {
return this[MAKEFS](null, entry)
}
if (st.isDirectory()) {
if (entry.type === 'Directory') {
const needChmod = !this.noChmod &&
entry.mode &&
(st.mode & 0o7777) !== entry.mode
const [er] = needChmod ? callSync(() => {
fs.chmodSync(entry.absolute, entry.mode)
}) : []
return this[MAKEFS](er, entry)
}
// not a dir entry, have to remove it
const [er] = callSync(() => fs.rmdirSync(entry.absolute))
this[MAKEFS](er, entry)
}
// not a dir, and not reusable.
// don't remove if it's the cwd, since we want that error.
const [er] = entry.absolute === this.cwd ? []
: callSync(() => unlinkFileSync(entry.absolute))
this[MAKEFS](er, entry)
}
[FILE] (entry, done) {
const mode = entry.mode & 0o7777 || this.fmode
const oner = er => {
let closeError
try {
fs.closeSync(fd)
} catch (e) {
closeError = e
}
if (er || closeError) {
this[ONERROR](er || closeError, entry)
}
done()
}
let fd
try {
fd = fs.openSync(entry.absolute, getFlag(entry.size), mode)
} catch (er) {
return oner(er)
}
const tx = this.transform ? this.transform(entry) || entry : entry
if (tx !== entry) {
tx.on('error', er => this[ONERROR](er, entry))
entry.pipe(tx)
}
tx.on('data', chunk => {
try {
fs.writeSync(fd, chunk, 0, chunk.length)
} catch (er) {
oner(er)
}
})
tx.on('end', _ => {
let er = null
// try both, falling futimes back to utimes
// if either fails, handle the first error
if (entry.mtime && !this.noMtime) {
const atime = entry.atime || new Date()
const mtime = entry.mtime
try {
fs.futimesSync(fd, atime, mtime)
} catch (futimeser) {
try {
fs.utimesSync(entry.absolute, atime, mtime)
} catch (utimeser) {
er = futimeser
}
}
}
if (this[DOCHOWN](entry)) {
const uid = this[UID](entry)
const gid = this[GID](entry)
try {
fs.fchownSync(fd, uid, gid)
} catch (fchowner) {
try {
fs.chownSync(entry.absolute, uid, gid)
} catch (chowner) {
er = er || fchowner
}
}
}
oner(er)
})
}
[DIRECTORY] (entry, done) {
const mode = entry.mode & 0o7777 || this.dmode
const er = this[MKDIR](entry.absolute, mode)
if (er) {
this[ONERROR](er, entry)
done()
return
}
if (entry.mtime && !this.noMtime) {
try {
fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime)
} catch (er) {}
}
if (this[DOCHOWN](entry)) {
try {
fs.chownSync(entry.absolute, this[UID](entry), this[GID](entry))
} catch (er) {}
}
done()
entry.resume()
}
[MKDIR] (dir, mode) {
try {
return mkdir.sync(normPath(dir), {
uid: this.uid,
gid: this.gid,
processUid: this.processUid,
processGid: this.processGid,
umask: this.processUmask,
preserve: this.preservePaths,
unlink: this.unlink,
cache: this.dirCache,
cwd: this.cwd,
mode: mode,
})
} catch (er) {
return er
}
}
[LINK] (entry, linkpath, link, done) {
try {
fs[link + 'Sync'](linkpath, entry.absolute)
done()
entry.resume()
} catch (er) {
return this[ONERROR](er, entry)
}
}
}
Unpack.Sync = UnpackSync
module.exports = Unpack

40
backend/apis/nodejs/node_modules/tar/lib/update.js generated vendored Normal file
View File

@ -0,0 +1,40 @@
'use strict'
// tar -u
const hlo = require('./high-level-opt.js')
const r = require('./replace.js')
// just call tar.r with the filter and mtimeCache
module.exports = (opt_, files, cb) => {
const opt = hlo(opt_)
if (!opt.file) {
throw new TypeError('file is required')
}
if (opt.gzip || opt.brotli || opt.file.endsWith('.br') || opt.file.endsWith('.tbr')) {
throw new TypeError('cannot append to compressed archives')
}
if (!files || !Array.isArray(files) || !files.length) {
throw new TypeError('no files or directories specified')
}
files = Array.from(files)
mtimeFilter(opt)
return r(opt, files, cb)
}
const mtimeFilter = opt => {
const filter = opt.filter
if (!opt.mtimeCache) {
opt.mtimeCache = new Map()
}
opt.filter = filter ? (path, stat) =>
filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime)
: (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime)
}

24
backend/apis/nodejs/node_modules/tar/lib/warn-mixin.js generated vendored Normal file
View File

@ -0,0 +1,24 @@
'use strict'
module.exports = Base => class extends Base {
warn (code, message, data = {}) {
if (this.file) {
data.file = this.file
}
if (this.cwd) {
data.cwd = this.cwd
}
data.code = message instanceof Error && message.code || code
data.tarCode = code
if (!this.strict && data.recoverable !== false) {
if (message instanceof Error) {
data = Object.assign(message, data)
message = message.message
}
this.emit('warn', data.tarCode, message, data)
} else if (message instanceof Error) {
this.emit('error', Object.assign(message, data))
} else {
this.emit('error', Object.assign(new Error(`${code}: ${message}`), data))
}
}
}

23
backend/apis/nodejs/node_modules/tar/lib/winchars.js generated vendored Normal file
View File

@ -0,0 +1,23 @@
'use strict'
// When writing files on Windows, translate the characters to their
// 0xf000 higher-encoded versions.
const raw = [
'|',
'<',
'>',
'?',
':',
]
const win = raw.map(char =>
String.fromCharCode(0xf000 + char.charCodeAt(0)))
const toWin = new Map(raw.map((char, i) => [char, win[i]]))
const toRaw = new Map(win.map((char, i) => [char, raw[i]]))
module.exports = {
encode: s => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s),
decode: s => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s),
}

546
backend/apis/nodejs/node_modules/tar/lib/write-entry.js generated vendored Normal file
View File

@ -0,0 +1,546 @@
'use strict'
const { Minipass } = require('minipass')
const Pax = require('./pax.js')
const Header = require('./header.js')
const fs = require('fs')
const path = require('path')
const normPath = require('./normalize-windows-path.js')
const stripSlash = require('./strip-trailing-slashes.js')
const prefixPath = (path, prefix) => {
if (!prefix) {
return normPath(path)
}
path = normPath(path).replace(/^\.(\/|$)/, '')
return stripSlash(prefix) + '/' + path
}
const maxReadSize = 16 * 1024 * 1024
const PROCESS = Symbol('process')
const FILE = Symbol('file')
const DIRECTORY = Symbol('directory')
const SYMLINK = Symbol('symlink')
const HARDLINK = Symbol('hardlink')
const HEADER = Symbol('header')
const READ = Symbol('read')
const LSTAT = Symbol('lstat')
const ONLSTAT = Symbol('onlstat')
const ONREAD = Symbol('onread')
const ONREADLINK = Symbol('onreadlink')
const OPENFILE = Symbol('openfile')
const ONOPENFILE = Symbol('onopenfile')
const CLOSE = Symbol('close')
const MODE = Symbol('mode')
const AWAITDRAIN = Symbol('awaitDrain')
const ONDRAIN = Symbol('ondrain')
const PREFIX = Symbol('prefix')
const HAD_ERROR = Symbol('hadError')
const warner = require('./warn-mixin.js')
const winchars = require('./winchars.js')
const stripAbsolutePath = require('./strip-absolute-path.js')
const modeFix = require('./mode-fix.js')
const WriteEntry = warner(class WriteEntry extends Minipass {
constructor (p, opt) {
opt = opt || {}
super(opt)
if (typeof p !== 'string') {
throw new TypeError('path is required')
}
this.path = normPath(p)
// suppress atime, ctime, uid, gid, uname, gname
this.portable = !!opt.portable
// until node has builtin pwnam functions, this'll have to do
this.myuid = process.getuid && process.getuid() || 0
this.myuser = process.env.USER || ''
this.maxReadSize = opt.maxReadSize || maxReadSize
this.linkCache = opt.linkCache || new Map()
this.statCache = opt.statCache || new Map()
this.preservePaths = !!opt.preservePaths
this.cwd = normPath(opt.cwd || process.cwd())
this.strict = !!opt.strict
this.noPax = !!opt.noPax
this.noMtime = !!opt.noMtime
this.mtime = opt.mtime || null
this.prefix = opt.prefix ? normPath(opt.prefix) : null
this.fd = null
this.blockLen = null
this.blockRemain = null
this.buf = null
this.offset = null
this.length = null
this.pos = null
this.remain = null
if (typeof opt.onwarn === 'function') {
this.on('warn', opt.onwarn)
}
let pathWarn = false
if (!this.preservePaths) {
const [root, stripped] = stripAbsolutePath(this.path)
if (root) {
this.path = stripped
pathWarn = root
}
}
this.win32 = !!opt.win32 || process.platform === 'win32'
if (this.win32) {
// force the \ to / normalization, since we might not *actually*
// be on windows, but want \ to be considered a path separator.
this.path = winchars.decode(this.path.replace(/\\/g, '/'))
p = p.replace(/\\/g, '/')
}
this.absolute = normPath(opt.absolute || path.resolve(this.cwd, p))
if (this.path === '') {
this.path = './'
}
if (pathWarn) {
this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
entry: this,
path: pathWarn + this.path,
})
}
if (this.statCache.has(this.absolute)) {
this[ONLSTAT](this.statCache.get(this.absolute))
} else {
this[LSTAT]()
}
}
emit (ev, ...data) {
if (ev === 'error') {
this[HAD_ERROR] = true
}
return super.emit(ev, ...data)
}
[LSTAT] () {
fs.lstat(this.absolute, (er, stat) => {
if (er) {
return this.emit('error', er)
}
this[ONLSTAT](stat)
})
}
[ONLSTAT] (stat) {
this.statCache.set(this.absolute, stat)
this.stat = stat
if (!stat.isFile()) {
stat.size = 0
}
this.type = getType(stat)
this.emit('stat', stat)
this[PROCESS]()
}
[PROCESS] () {
switch (this.type) {
case 'File': return this[FILE]()
case 'Directory': return this[DIRECTORY]()
case 'SymbolicLink': return this[SYMLINK]()
// unsupported types are ignored.
default: return this.end()
}
}
[MODE] (mode) {
return modeFix(mode, this.type === 'Directory', this.portable)
}
[PREFIX] (path) {
return prefixPath(path, this.prefix)
}
[HEADER] () {
if (this.type === 'Directory' && this.portable) {
this.noMtime = true
}
this.header = new Header({
path: this[PREFIX](this.path),
// only apply the prefix to hard links.
linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
: this.linkpath,
// only the permissions and setuid/setgid/sticky bitflags
// not the higher-order bits that specify file type
mode: this[MODE](this.stat.mode),
uid: this.portable ? null : this.stat.uid,
gid: this.portable ? null : this.stat.gid,
size: this.stat.size,
mtime: this.noMtime ? null : this.mtime || this.stat.mtime,
type: this.type,
uname: this.portable ? null :
this.stat.uid === this.myuid ? this.myuser : '',
atime: this.portable ? null : this.stat.atime,
ctime: this.portable ? null : this.stat.ctime,
})
if (this.header.encode() && !this.noPax) {
super.write(new Pax({
atime: this.portable ? null : this.header.atime,
ctime: this.portable ? null : this.header.ctime,
gid: this.portable ? null : this.header.gid,
mtime: this.noMtime ? null : this.mtime || this.header.mtime,
path: this[PREFIX](this.path),
linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
: this.linkpath,
size: this.header.size,
uid: this.portable ? null : this.header.uid,
uname: this.portable ? null : this.header.uname,
dev: this.portable ? null : this.stat.dev,
ino: this.portable ? null : this.stat.ino,
nlink: this.portable ? null : this.stat.nlink,
}).encode())
}
super.write(this.header.block)
}
[DIRECTORY] () {
if (this.path.slice(-1) !== '/') {
this.path += '/'
}
this.stat.size = 0
this[HEADER]()
this.end()
}
[SYMLINK] () {
fs.readlink(this.absolute, (er, linkpath) => {
if (er) {
return this.emit('error', er)
}
this[ONREADLINK](linkpath)
})
}
[ONREADLINK] (linkpath) {
this.linkpath = normPath(linkpath)
this[HEADER]()
this.end()
}
[HARDLINK] (linkpath) {
this.type = 'Link'
this.linkpath = normPath(path.relative(this.cwd, linkpath))
this.stat.size = 0
this[HEADER]()
this.end()
}
[FILE] () {
if (this.stat.nlink > 1) {
const linkKey = this.stat.dev + ':' + this.stat.ino
if (this.linkCache.has(linkKey)) {
const linkpath = this.linkCache.get(linkKey)
if (linkpath.indexOf(this.cwd) === 0) {
return this[HARDLINK](linkpath)
}
}
this.linkCache.set(linkKey, this.absolute)
}
this[HEADER]()
if (this.stat.size === 0) {
return this.end()
}
this[OPENFILE]()
}
[OPENFILE] () {
fs.open(this.absolute, 'r', (er, fd) => {
if (er) {
return this.emit('error', er)
}
this[ONOPENFILE](fd)
})
}
[ONOPENFILE] (fd) {
this.fd = fd
if (this[HAD_ERROR]) {
return this[CLOSE]()
}
this.blockLen = 512 * Math.ceil(this.stat.size / 512)
this.blockRemain = this.blockLen
const bufLen = Math.min(this.blockLen, this.maxReadSize)
this.buf = Buffer.allocUnsafe(bufLen)
this.offset = 0
this.pos = 0
this.remain = this.stat.size
this.length = this.buf.length
this[READ]()
}
[READ] () {
const { fd, buf, offset, length, pos } = this
fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
if (er) {
// ignoring the error from close(2) is a bad practice, but at
// this point we already have an error, don't need another one
return this[CLOSE](() => this.emit('error', er))
}
this[ONREAD](bytesRead)
})
}
[CLOSE] (cb) {
fs.close(this.fd, cb)
}
[ONREAD] (bytesRead) {
if (bytesRead <= 0 && this.remain > 0) {
const er = new Error('encountered unexpected EOF')
er.path = this.absolute
er.syscall = 'read'
er.code = 'EOF'
return this[CLOSE](() => this.emit('error', er))
}
if (bytesRead > this.remain) {
const er = new Error('did not encounter expected EOF')
er.path = this.absolute
er.syscall = 'read'
er.code = 'EOF'
return this[CLOSE](() => this.emit('error', er))
}
// null out the rest of the buffer, if we could fit the block padding
// at the end of this loop, we've incremented bytesRead and this.remain
// to be incremented up to the blockRemain level, as if we had expected
// to get a null-padded file, and read it until the end. then we will
// decrement both remain and blockRemain by bytesRead, and know that we
// reached the expected EOF, without any null buffer to append.
if (bytesRead === this.remain) {
for (let i = bytesRead; i < this.length && bytesRead < this.blockRemain; i++) {
this.buf[i + this.offset] = 0
bytesRead++
this.remain++
}
}
const writeBuf = this.offset === 0 && bytesRead === this.buf.length ?
this.buf : this.buf.slice(this.offset, this.offset + bytesRead)
const flushed = this.write(writeBuf)
if (!flushed) {
this[AWAITDRAIN](() => this[ONDRAIN]())
} else {
this[ONDRAIN]()
}
}
[AWAITDRAIN] (cb) {
this.once('drain', cb)
}
write (writeBuf) {
if (this.blockRemain < writeBuf.length) {
const er = new Error('writing more data than expected')
er.path = this.absolute
return this.emit('error', er)
}
this.remain -= writeBuf.length
this.blockRemain -= writeBuf.length
this.pos += writeBuf.length
this.offset += writeBuf.length
return super.write(writeBuf)
}
[ONDRAIN] () {
if (!this.remain) {
if (this.blockRemain) {
super.write(Buffer.alloc(this.blockRemain))
}
return this[CLOSE](er => er ? this.emit('error', er) : this.end())
}
if (this.offset >= this.length) {
// if we only have a smaller bit left to read, alloc a smaller buffer
// otherwise, keep it the same length it was before.
this.buf = Buffer.allocUnsafe(Math.min(this.blockRemain, this.buf.length))
this.offset = 0
}
this.length = this.buf.length - this.offset
this[READ]()
}
})
class WriteEntrySync extends WriteEntry {
[LSTAT] () {
this[ONLSTAT](fs.lstatSync(this.absolute))
}
[SYMLINK] () {
this[ONREADLINK](fs.readlinkSync(this.absolute))
}
[OPENFILE] () {
this[ONOPENFILE](fs.openSync(this.absolute, 'r'))
}
[READ] () {
let threw = true
try {
const { fd, buf, offset, length, pos } = this
const bytesRead = fs.readSync(fd, buf, offset, length, pos)
this[ONREAD](bytesRead)
threw = false
} finally {
// ignoring the error from close(2) is a bad practice, but at
// this point we already have an error, don't need another one
if (threw) {
try {
this[CLOSE](() => {})
} catch (er) {}
}
}
}
[AWAITDRAIN] (cb) {
cb()
}
[CLOSE] (cb) {
fs.closeSync(this.fd)
cb()
}
}
const WriteEntryTar = warner(class WriteEntryTar extends Minipass {
constructor (readEntry, opt) {
opt = opt || {}
super(opt)
this.preservePaths = !!opt.preservePaths
this.portable = !!opt.portable
this.strict = !!opt.strict
this.noPax = !!opt.noPax
this.noMtime = !!opt.noMtime
this.readEntry = readEntry
this.type = readEntry.type
if (this.type === 'Directory' && this.portable) {
this.noMtime = true
}
this.prefix = opt.prefix || null
this.path = normPath(readEntry.path)
this.mode = this[MODE](readEntry.mode)
this.uid = this.portable ? null : readEntry.uid
this.gid = this.portable ? null : readEntry.gid
this.uname = this.portable ? null : readEntry.uname
this.gname = this.portable ? null : readEntry.gname
this.size = readEntry.size
this.mtime = this.noMtime ? null : opt.mtime || readEntry.mtime
this.atime = this.portable ? null : readEntry.atime
this.ctime = this.portable ? null : readEntry.ctime
this.linkpath = normPath(readEntry.linkpath)
if (typeof opt.onwarn === 'function') {
this.on('warn', opt.onwarn)
}
let pathWarn = false
if (!this.preservePaths) {
const [root, stripped] = stripAbsolutePath(this.path)
if (root) {
this.path = stripped
pathWarn = root
}
}
this.remain = readEntry.size
this.blockRemain = readEntry.startBlockSize
this.header = new Header({
path: this[PREFIX](this.path),
linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
: this.linkpath,
// only the permissions and setuid/setgid/sticky bitflags
// not the higher-order bits that specify file type
mode: this.mode,
uid: this.portable ? null : this.uid,
gid: this.portable ? null : this.gid,
size: this.size,
mtime: this.noMtime ? null : this.mtime,
type: this.type,
uname: this.portable ? null : this.uname,
atime: this.portable ? null : this.atime,
ctime: this.portable ? null : this.ctime,
})
if (pathWarn) {
this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
entry: this,
path: pathWarn + this.path,
})
}
if (this.header.encode() && !this.noPax) {
super.write(new Pax({
atime: this.portable ? null : this.atime,
ctime: this.portable ? null : this.ctime,
gid: this.portable ? null : this.gid,
mtime: this.noMtime ? null : this.mtime,
path: this[PREFIX](this.path),
linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
: this.linkpath,
size: this.size,
uid: this.portable ? null : this.uid,
uname: this.portable ? null : this.uname,
dev: this.portable ? null : this.readEntry.dev,
ino: this.portable ? null : this.readEntry.ino,
nlink: this.portable ? null : this.readEntry.nlink,
}).encode())
}
super.write(this.header.block)
readEntry.pipe(this)
}
[PREFIX] (path) {
return prefixPath(path, this.prefix)
}
[MODE] (mode) {
return modeFix(mode, this.type === 'Directory', this.portable)
}
write (data) {
const writeLen = data.length
if (writeLen > this.blockRemain) {
throw new Error('writing more to entry than is appropriate')
}
this.blockRemain -= writeLen
return super.write(data)
}
end () {
if (this.blockRemain) {
super.write(Buffer.alloc(this.blockRemain))
}
return super.end()
}
})
WriteEntry.Sync = WriteEntrySync
WriteEntry.Tar = WriteEntryTar
const getType = stat =>
stat.isFile() ? 'File'
: stat.isDirectory() ? 'Directory'
: stat.isSymbolicLink() ? 'SymbolicLink'
: 'Unsupported'
module.exports = WriteEntry