aboutsummaryrefslogtreecommitdiff
path: root/sandbox/testAppNevena/Front/node_modules/tar
diff options
context:
space:
mode:
Diffstat (limited to 'sandbox/testAppNevena/Front/node_modules/tar')
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/LICENSE15
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/README.md1042
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/index.js18
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/create.js104
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/extract.js107
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/get-write-flag.js20
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/header.js288
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/high-level-opt.js29
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/large-numbers.js99
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/list.js132
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/mkdir.js213
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/mode-fix.js23
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/normalize-unicode.js11
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/normalize-windows-path.js8
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/pack.js397
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/parse.js481
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/path-reservations.js148
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/pax.js143
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/read-entry.js100
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/replace.js223
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/strip-absolute-path.js24
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/strip-trailing-slashes.js13
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/types.js44
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/unpack.js877
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/update.js36
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/warn-mixin.js21
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/winchars.js23
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/lib/write-entry.js525
-rw-r--r--sandbox/testAppNevena/Front/node_modules/tar/package.json59
29 files changed, 0 insertions, 5223 deletions
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/LICENSE b/sandbox/testAppNevena/Front/node_modules/tar/LICENSE
deleted file mode 100644
index 19129e31..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/LICENSE
+++ /dev/null
@@ -1,15 +0,0 @@
-The ISC License
-
-Copyright (c) Isaac Z. Schlueter and Contributors
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
-IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/README.md b/sandbox/testAppNevena/Front/node_modules/tar/README.md
deleted file mode 100644
index 42afb1aa..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/README.md
+++ /dev/null
@@ -1,1042 +0,0 @@
-# node-tar
-
-[Fast](./benchmarks) and full-featured Tar for Node.js
-
-The API is designed to mimic the behavior of `tar(1)` on unix systems.
-If you are familiar with how tar works, most of this will hopefully be
-straightforward for you. If not, then hopefully this module can teach
-you useful unix skills that may come in handy someday :)
-
-## Background
-
-A "tar file" or "tarball" is an archive of file system entries
-(directories, files, links, etc.) The name comes from "tape archive".
-If you run `man tar` on almost any Unix command line, you'll learn
-quite a bit about what it can do, and its history.
-
-Tar has 5 main top-level commands:
-
-* `c` Create an archive
-* `r` Replace entries within an archive
-* `u` Update entries within an archive (ie, replace if they're newer)
-* `t` List out the contents of an archive
-* `x` Extract an archive to disk
-
-The other flags and options modify how this top level function works.
-
-## High-Level API
-
-These 5 functions are the high-level API. All of them have a
-single-character name (for unix nerds familiar with `tar(1)`) as well
-as a long name (for everyone else).
-
-All the high-level functions take the following arguments, all three
-of which are optional and may be omitted.
-
-1. `options` - An optional object specifying various options
-2. `paths` - An array of paths to add or extract
-3. `callback` - Called when the command is completed, if async. (If
- sync or no file specified, providing a callback throws a
- `TypeError`.)
-
-If the command is sync (ie, if `options.sync=true`), then the
-callback is not allowed, since the action will be completed immediately.
-
-If a `file` argument is specified, and the command is async, then a
-`Promise` is returned. In this case, if async, a callback may be
-provided which is called when the command is completed.
-
-If a `file` option is not specified, then a stream is returned. For
-`create`, this is a readable stream of the generated archive. For
-`list` and `extract` this is a writable stream that an archive should
-be written into. If a file is not specified, then a callback is not
-allowed, because you're already getting a stream to work with.
-
-`replace` and `update` only work on existing archives, and so require
-a `file` argument.
-
-Sync commands without a file argument return a stream that acts on its
-input immediately in the same tick. For readable streams, this means
-that all of the data is immediately available by calling
-`stream.read()`. For writable streams, it will be acted upon as soon
-as it is provided, but this can be at any time.
-
-### Warnings and Errors
-
-Tar emits warnings and errors for recoverable and unrecoverable situations,
-respectively. In many cases, a warning only affects a single entry in an
-archive, or is simply informing you that it's modifying an entry to comply
-with the settings provided.
-
-Unrecoverable warnings will always raise an error (ie, emit `'error'` on
-streaming actions, throw for non-streaming sync actions, reject the
-returned Promise for non-streaming async operations, or call a provided
-callback with an `Error` as the first argument). Recoverable errors will
-raise an error only if `strict: true` is set in the options.
-
-Respond to (recoverable) warnings by listening to the `warn` event.
-Handlers receive 3 arguments:
-
-- `code` String. One of the error codes below. This may not match
- `data.code`, which preserves the original error code from fs and zlib.
-- `message` String. More details about the error.
-- `data` Metadata about the error. An `Error` object for errors raised by
- fs and zlib. All fields are attached to errors raisd by tar. Typically
- contains the following fields, as relevant:
- - `tarCode` The tar error code.
- - `code` Either the tar error code, or the error code set by the
- underlying system.
- - `file` The archive file being read or written.
- - `cwd` Working directory for creation and extraction operations.
- - `entry` The entry object (if it could be created) for `TAR_ENTRY_INFO`,
- `TAR_ENTRY_INVALID`, and `TAR_ENTRY_ERROR` warnings.
- - `header` The header object (if it could be created, and the entry could
- not be created) for `TAR_ENTRY_INFO` and `TAR_ENTRY_INVALID` warnings.
- - `recoverable` Boolean. If `false`, then the warning will emit an
- `error`, even in non-strict mode.
-
-#### Error Codes
-
-* `TAR_ENTRY_INFO` An informative error indicating that an entry is being
- modified, but otherwise processed normally. For example, removing `/` or
- `C:\` from absolute paths if `preservePaths` is not set.
-
-* `TAR_ENTRY_INVALID` An indication that a given entry is not a valid tar
- archive entry, and will be skipped. This occurs when:
- - a checksum fails,
- - a `linkpath` is missing for a link type, or
- - a `linkpath` is provided for a non-link type.
-
- If every entry in a parsed archive raises an `TAR_ENTRY_INVALID` error,
- then the archive is presumed to be unrecoverably broken, and
- `TAR_BAD_ARCHIVE` will be raised.
-
-* `TAR_ENTRY_ERROR` The entry appears to be a valid tar archive entry, but
- encountered an error which prevented it from being unpacked. This occurs
- when:
- - an unrecoverable fs error happens during unpacking,
- - an entry has `..` in the path and `preservePaths` is not set, or
- - an entry is extracting through a symbolic link, when `preservePaths` is
- not set.
-
-* `TAR_ENTRY_UNSUPPORTED` An indication that a given entry is
- a valid archive entry, but of a type that is unsupported, and so will be
- skipped in archive creation or extracting.
-
-* `TAR_ABORT` When parsing gzipped-encoded archives, the parser will
- abort the parse process raise a warning for any zlib errors encountered.
- Aborts are considered unrecoverable for both parsing and unpacking.
-
-* `TAR_BAD_ARCHIVE` The archive file is totally hosed. This can happen for
- a number of reasons, and always occurs at the end of a parse or extract:
-
- - An entry body was truncated before seeing the full number of bytes.
- - The archive contained only invalid entries, indicating that it is
- likely not an archive, or at least, not an archive this library can
- parse.
-
- `TAR_BAD_ARCHIVE` is considered informative for parse operations, but
- unrecoverable for extraction. Note that, if encountered at the end of an
- extraction, tar WILL still have extracted as much it could from the
- archive, so there may be some garbage files to clean up.
-
-Errors that occur deeper in the system (ie, either the filesystem or zlib)
-will have their error codes left intact, and a `tarCode` matching one of
-the above will be added to the warning metadata or the raised error object.
-
-Errors generated by tar will have one of the above codes set as the
-`error.code` field as well, but since errors originating in zlib or fs will
-have their original codes, it's better to read `error.tarCode` if you wish
-to see how tar is handling the issue.
-
-### Examples
-
-The API mimics the `tar(1)` command line functionality, with aliases
-for more human-readable option and function names. The goal is that
-if you know how to use `tar(1)` in Unix, then you know how to use
-`require('tar')` in JavaScript.
-
-To replicate `tar czf my-tarball.tgz files and folders`, you'd do:
-
-```js
-tar.c(
- {
- gzip: <true|gzip options>,
- file: 'my-tarball.tgz'
- },
- ['some', 'files', 'and', 'folders']
-).then(_ => { .. tarball has been created .. })
-```
-
-To replicate `tar cz files and folders > my-tarball.tgz`, you'd do:
-
-```js
-tar.c( // or tar.create
- {
- gzip: <true|gzip options>
- },
- ['some', 'files', 'and', 'folders']
-).pipe(fs.createWriteStream('my-tarball.tgz'))
-```
-
-To replicate `tar xf my-tarball.tgz` you'd do:
-
-```js
-tar.x( // or tar.extract(
- {
- file: 'my-tarball.tgz'
- }
-).then(_=> { .. tarball has been dumped in cwd .. })
-```
-
-To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`:
-
-```js
-fs.createReadStream('my-tarball.tgz').pipe(
- tar.x({
- strip: 1,
- C: 'some-dir' // alias for cwd:'some-dir', also ok
- })
-)
-```
-
-To replicate `tar tf my-tarball.tgz`, do this:
-
-```js
-tar.t({
- file: 'my-tarball.tgz',
- onentry: entry => { .. do whatever with it .. }
-})
-```
-
-To replicate `cat my-tarball.tgz | tar t` do:
-
-```js
-fs.createReadStream('my-tarball.tgz')
- .pipe(tar.t())
- .on('entry', entry => { .. do whatever with it .. })
-```
-
-To do anything synchronous, add `sync: true` to the options. Note
-that sync functions don't take a callback and don't return a promise.
-When the function returns, it's already done. Sync methods without a
-file argument return a sync stream, which flushes immediately. But,
-of course, it still won't be done until you `.end()` it.
-
-To filter entries, add `filter: <function>` to the options.
-Tar-creating methods call the filter with `filter(path, stat)`.
-Tar-reading methods (including extraction) call the filter with
-`filter(path, entry)`. The filter is called in the `this`-context of
-the `Pack` or `Unpack` stream object.
-
-The arguments list to `tar t` and `tar x` specify a list of filenames
-to extract or list, so they're equivalent to a filter that tests if
-the file is in the list.
-
-For those who _aren't_ fans of tar's single-character command names:
-
-```
-tar.c === tar.create
-tar.r === tar.replace (appends to archive, file is required)
-tar.u === tar.update (appends if newer, file is required)
-tar.x === tar.extract
-tar.t === tar.list
-```
-
-Keep reading for all the command descriptions and options, as well as
-the low-level API that they are built on.
-
-### tar.c(options, fileList, callback) [alias: tar.create]
-
-Create a tarball archive.
-
-The `fileList` is an array of paths to add to the tarball. Adding a
-directory also adds its children recursively.
-
-An entry in `fileList` that starts with an `@` symbol is a tar archive
-whose entries will be added. To add a file that starts with `@`,
-prepend it with `./`.
-
-The following options are supported:
-
-- `file` Write the tarball archive to the specified filename. If this
- is specified, then the callback will be fired when the file has been
- written, and a promise will be returned that resolves when the file
- is written. If a filename is not specified, then a Readable Stream
- will be returned which will emit the file data. [Alias: `f`]
-- `sync` Act synchronously. If this is set, then any provided file
- will be fully written after the call to `tar.c`. If this is set,
- and a file is not provided, then the resulting stream will already
- have the data ready to `read` or `emit('data')` as soon as you
- request it.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `cwd` The current working directory for creating the archive.
- Defaults to `process.cwd()`. [Alias: `C`]
-- `prefix` A path portion to prefix onto the entries in the archive.
-- `gzip` Set to any truthy value to create a gzipped archive, or an
- object with settings for `zlib.Gzip()` [Alias: `z`]
-- `filter` A function that gets called with `(path, stat)` for each
- entry being added. Return `true` to add the entry to the archive,
- or `false` to omit it.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths. [Alias: `P`]
-- `mode` The mode to set on the created file archive
-- `noDirRecurse` Do not recursively archive the contents of
- directories. [Alias: `n`]
-- `follow` Set to true to pack the targets of symbolic links. Without
- this option, symbolic links are archived as such. [Alias: `L`, `h`]
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
- [Alias: `m`, `no-mtime`]
-- `mtime` Set to a `Date` object to force a specific `mtime` for
- everything added to the archive. Overridden by `noMtime`.
-
-The following options are mostly internal, but can be modified in some
-advanced use cases, such as re-using caches between runs.
-
-- `linkCache` A Map object containing the device and inode value for
- any file whose nlink is > 1, to identify hard links.
-- `statCache` A Map object that caches calls `lstat`.
-- `readdirCache` A Map object that caches calls to `readdir`.
-- `jobs` A number specifying how many concurrent jobs to run.
- Defaults to 4.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-
-### tar.x(options, fileList, callback) [alias: tar.extract]
-
-Extract a tarball archive.
-
-The `fileList` is an array of paths to extract from the tarball. If
-no paths are provided, then all the entries are extracted.
-
-If the archive is gzipped, then tar will detect this and unzip it.
-
-Note that all directories that are created will be forced to be
-writable, readable, and listable by their owner, to avoid cases where
-a directory prevents extraction of child entries by virtue of its
-mode.
-
-Most extraction errors will cause a `warn` event to be emitted. If
-the `cwd` is missing, or not a directory, then the extraction will
-fail completely.
-
-The following options are supported:
-
-- `cwd` Extract files relative to the specified directory. Defaults
- to `process.cwd()`. If provided, this must exist and must be a
- directory. [Alias: `C`]
-- `file` The archive file to extract. If not specified, then a
- Writable stream is returned where the archive data should be
- written. [Alias: `f`]
-- `sync` Create files and directories synchronously.
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `filter` A function that gets called with `(path, entry)` for each
- entry being unpacked. Return `true` to unpack the entry from the
- archive, or `false` to skip it.
-- `newer` Set to true to keep the existing file on disk if it's newer
- than the file in the archive. [Alias: `keep-newer`,
- `keep-newer-files`]
-- `keep` Do not overwrite existing files. In particular, if a file
- appears more than once in an archive, later copies will not
- overwrite earlier copies. [Alias: `k`, `keep-existing`]
-- `preservePaths` Allow absolute paths, paths containing `..`, and
- extracting through symbolic links. By default, `/` is stripped from
- absolute paths, `..` paths are not extracted, and any file whose
- location would be modified by a symbolic link is not extracted.
- [Alias: `P`]
-- `unlink` Unlink files before creating them. Without this option,
- tar overwrites existing files, which preserves existing hardlinks.
- With this option, existing hardlinks will be broken, as will any
- symlink that would affect the location of an extracted file. [Alias:
- `U`]
-- `strip` Remove the specified number of leading path elements.
- Pathnames with fewer elements will be silently skipped. Note that
- the pathname is edited after applying the filter, but before
- security checks. [Alias: `strip-components`, `stripComponents`]
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `preserveOwner` If true, tar will set the `uid` and `gid` of
- extracted entries to the `uid` and `gid` fields in the archive.
- This defaults to true when run as root, and false otherwise. If
- false, then files and directories will be set with the owner and
- group of the user running the process. This is similar to `-p` in
- `tar(1)`, but ACLs and other system-specific data is never unpacked
- in this implementation, and modes are set by default already.
- [Alias: `p`]
-- `uid` Set to a number to force ownership of all extracted files and
- folders, and all implicitly created directories, to be owned by the
- specified user id, regardless of the `uid` field in the archive.
- Cannot be used along with `preserveOwner`. Requires also setting a
- `gid` option.
-- `gid` Set to a number to force ownership of all extracted files and
- folders, and all implicitly created directories, to be owned by the
- specified group id, regardless of the `gid` field in the archive.
- Cannot be used along with `preserveOwner`. Requires also setting a
- `uid` option.
-- `noMtime` Set to true to omit writing `mtime` value for extracted
- entries. [Alias: `m`, `no-mtime`]
-- `transform` Provide a function that takes an `entry` object, and
- returns a stream, or any falsey value. If a stream is provided,
- then that stream's data will be written instead of the contents of
- the archive entry. If a falsey value is provided, then the entry is
- written to disk as normal. (To exclude items from extraction, use
- the `filter` option described above.)
-- `onentry` A function that gets called with `(entry)` for each entry
- that passes the filter.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `noChmod` Set to true to omit calling `fs.chmod()` to ensure that the
- extracted file matches the entry mode. This also suppresses the call to
- `process.umask()` to determine the default umask value, since tar will
- extract with whatever mode is provided, and let the process `umask` apply
- normally.
-
-The following options are mostly internal, but can be modified in some
-advanced use cases, such as re-using caches between runs.
-
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `umask` Filter the modes of entries like `process.umask()`.
-- `dmode` Default mode for directories
-- `fmode` Default mode for files
-- `dirCache` A Map object of which directories exist.
-- `maxMetaEntrySize` The maximum size of meta entries that is
- supported. Defaults to 1 MB.
-
-Note that using an asynchronous stream type with the `transform`
-option will cause undefined behavior in sync extractions.
-[MiniPass](http://npm.im/minipass)-based streams are designed for this
-use case.
-
-### tar.t(options, fileList, callback) [alias: tar.list]
-
-List the contents of a tarball archive.
-
-The `fileList` is an array of paths to list from the tarball. If
-no paths are provided, then all the entries are listed.
-
-If the archive is gzipped, then tar will detect this and unzip it.
-
-Returns an event emitter that emits `entry` events with
-`tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'`
-events. (If you want to get actual readable entries, use the
-`tar.Parse` class instead.)
-
-The following options are supported:
-
-- `cwd` Extract files relative to the specified directory. Defaults
- to `process.cwd()`. [Alias: `C`]
-- `file` The archive file to list. If not specified, then a
- Writable stream is returned where the archive data should be
- written. [Alias: `f`]
-- `sync` Read the specified file synchronously. (This has no effect
- when a file option isn't specified, because entries are emitted as
- fast as they are parsed from the stream anyway.)
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `filter` A function that gets called with `(path, entry)` for each
- entry being listed. Return `true` to emit the entry from the
- archive, or `false` to skip it.
-- `onentry` A function that gets called with `(entry)` for each entry
- that passes the filter. This is important for when both `file` and
- `sync` are set, because it will be called synchronously.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `noResume` By default, `entry` streams are resumed immediately after
- the call to `onentry`. Set `noResume: true` to suppress this
- behavior. Note that by opting into this, the stream will never
- complete until the entry data is consumed.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-
-### tar.u(options, fileList, callback) [alias: tar.update]
-
-Add files to an archive if they are newer than the entry already in
-the tarball archive.
-
-The `fileList` is an array of paths to add to the tarball. Adding a
-directory also adds its children recursively.
-
-An entry in `fileList` that starts with an `@` symbol is a tar archive
-whose entries will be added. To add a file that starts with `@`,
-prepend it with `./`.
-
-The following options are supported:
-
-- `file` Required. Write the tarball archive to the specified
- filename. [Alias: `f`]
-- `sync` Act synchronously. If this is set, then any provided file
- will be fully written after the call to `tar.c`.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `cwd` The current working directory for adding entries to the
- archive. Defaults to `process.cwd()`. [Alias: `C`]
-- `prefix` A path portion to prefix onto the entries in the archive.
-- `gzip` Set to any truthy value to create a gzipped archive, or an
- object with settings for `zlib.Gzip()` [Alias: `z`]
-- `filter` A function that gets called with `(path, stat)` for each
- entry being added. Return `true` to add the entry to the archive,
- or `false` to omit it.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths. [Alias: `P`]
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `noDirRecurse` Do not recursively archive the contents of
- directories. [Alias: `n`]
-- `follow` Set to true to pack the targets of symbolic links. Without
- this option, symbolic links are archived as such. [Alias: `L`, `h`]
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
- [Alias: `m`, `no-mtime`]
-- `mtime` Set to a `Date` object to force a specific `mtime` for
- everything added to the archive. Overridden by `noMtime`.
-
-### tar.r(options, fileList, callback) [alias: tar.replace]
-
-Add files to an existing archive. Because later entries override
-earlier entries, this effectively replaces any existing entries.
-
-The `fileList` is an array of paths to add to the tarball. Adding a
-directory also adds its children recursively.
-
-An entry in `fileList` that starts with an `@` symbol is a tar archive
-whose entries will be added. To add a file that starts with `@`,
-prepend it with `./`.
-
-The following options are supported:
-
-- `file` Required. Write the tarball archive to the specified
- filename. [Alias: `f`]
-- `sync` Act synchronously. If this is set, then any provided file
- will be fully written after the call to `tar.c`.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `cwd` The current working directory for adding entries to the
- archive. Defaults to `process.cwd()`. [Alias: `C`]
-- `prefix` A path portion to prefix onto the entries in the archive.
-- `gzip` Set to any truthy value to create a gzipped archive, or an
- object with settings for `zlib.Gzip()` [Alias: `z`]
-- `filter` A function that gets called with `(path, stat)` for each
- entry being added. Return `true` to add the entry to the archive,
- or `false` to omit it.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths. [Alias: `P`]
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `noDirRecurse` Do not recursively archive the contents of
- directories. [Alias: `n`]
-- `follow` Set to true to pack the targets of symbolic links. Without
- this option, symbolic links are archived as such. [Alias: `L`, `h`]
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
- [Alias: `m`, `no-mtime`]
-- `mtime` Set to a `Date` object to force a specific `mtime` for
- everything added to the archive. Overridden by `noMtime`.
-
-
-## Low-Level API
-
-### class tar.Pack
-
-A readable tar stream.
-
-Has all the standard readable stream interface stuff. `'data'` and
-`'end'` events, `read()` method, `pause()` and `resume()`, etc.
-
-#### constructor(options)
-
-The following options are supported:
-
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `cwd` The current working directory for creating the archive.
- Defaults to `process.cwd()`.
-- `prefix` A path portion to prefix onto the entries in the archive.
-- `gzip` Set to any truthy value to create a gzipped archive, or an
- object with settings for `zlib.Gzip()`
-- `filter` A function that gets called with `(path, stat)` for each
- entry being added. Return `true` to add the entry to the archive,
- or `false` to omit it.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths.
-- `linkCache` A Map object containing the device and inode value for
- any file whose nlink is > 1, to identify hard links.
-- `statCache` A Map object that caches calls `lstat`.
-- `readdirCache` A Map object that caches calls to `readdir`.
-- `jobs` A number specifying how many concurrent jobs to run.
- Defaults to 4.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 16 MB.
-- `noDirRecurse` Do not recursively archive the contents of
- directories.
-- `follow` Set to true to pack the targets of symbolic links. Without
- this option, symbolic links are archived as such.
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
-- `mtime` Set to a `Date` object to force a specific `mtime` for
- everything added to the archive. Overridden by `noMtime`.
-
-#### add(path)
-
-Adds an entry to the archive. Returns the Pack stream.
-
-#### write(path)
-
-Adds an entry to the archive. Returns true if flushed.
-
-#### end()
-
-Finishes the archive.
-
-### class tar.Pack.Sync
-
-Synchronous version of `tar.Pack`.
-
-### class tar.Unpack
-
-A writable stream that unpacks a tar archive onto the file system.
-
-All the normal writable stream stuff is supported. `write()` and
-`end()` methods, `'drain'` events, etc.
-
-Note that all directories that are created will be forced to be
-writable, readable, and listable by their owner, to avoid cases where
-a directory prevents extraction of child entries by virtue of its
-mode.
-
-`'close'` is emitted when it's done writing stuff to the file system.
-
-Most unpack errors will cause a `warn` event to be emitted. If the
-`cwd` is missing, or not a directory, then an error will be emitted.
-
-#### constructor(options)
-
-- `cwd` Extract files relative to the specified directory. Defaults
- to `process.cwd()`. If provided, this must exist and must be a
- directory.
-- `filter` A function that gets called with `(path, entry)` for each
- entry being unpacked. Return `true` to unpack the entry from the
- archive, or `false` to skip it.
-- `newer` Set to true to keep the existing file on disk if it's newer
- than the file in the archive.
-- `keep` Do not overwrite existing files. In particular, if a file
- appears more than once in an archive, later copies will not
- overwrite earlier copies.
-- `preservePaths` Allow absolute paths, paths containing `..`, and
- extracting through symbolic links. By default, `/` is stripped from
- absolute paths, `..` paths are not extracted, and any file whose
- location would be modified by a symbolic link is not extracted.
-- `unlink` Unlink files before creating them. Without this option,
- tar overwrites existing files, which preserves existing hardlinks.
- With this option, existing hardlinks will be broken, as will any
- symlink that would affect the location of an extracted file.
-- `strip` Remove the specified number of leading path elements.
- Pathnames with fewer elements will be silently skipped. Note that
- the pathname is edited after applying the filter, but before
- security checks.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `umask` Filter the modes of entries like `process.umask()`.
-- `dmode` Default mode for directories
-- `fmode` Default mode for files
-- `dirCache` A Map object of which directories exist.
-- `maxMetaEntrySize` The maximum size of meta entries that is
- supported. Defaults to 1 MB.
-- `preserveOwner` If true, tar will set the `uid` and `gid` of
- extracted entries to the `uid` and `gid` fields in the archive.
- This defaults to true when run as root, and false otherwise. If
- false, then files and directories will be set with the owner and
- group of the user running the process. This is similar to `-p` in
- `tar(1)`, but ACLs and other system-specific data is never unpacked
- in this implementation, and modes are set by default already.
-- `win32` True if on a windows platform. Causes behavior where
- filenames containing `<|>?` chars are converted to
- windows-compatible values while being unpacked.
-- `uid` Set to a number to force ownership of all extracted files and
- folders, and all implicitly created directories, to be owned by the
- specified user id, regardless of the `uid` field in the archive.
- Cannot be used along with `preserveOwner`. Requires also setting a
- `gid` option.
-- `gid` Set to a number to force ownership of all extracted files and
- folders, and all implicitly created directories, to be owned by the
- specified group id, regardless of the `gid` field in the archive.
- Cannot be used along with `preserveOwner`. Requires also setting a
- `uid` option.
-- `noMtime` Set to true to omit writing `mtime` value for extracted
- entries.
-- `transform` Provide a function that takes an `entry` object, and
- returns a stream, or any falsey value. If a stream is provided,
- then that stream's data will be written instead of the contents of
- the archive entry. If a falsey value is provided, then the entry is
- written to disk as normal. (To exclude items from extraction, use
- the `filter` option described above.)
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `onentry` A function that gets called with `(entry)` for each entry
- that passes the filter.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `noChmod` Set to true to omit calling `fs.chmod()` to ensure that the
- extracted file matches the entry mode. This also suppresses the call to
- `process.umask()` to determine the default umask value, since tar will
- extract with whatever mode is provided, and let the process `umask` apply
- normally.
-
-### class tar.Unpack.Sync
-
-Synchronous version of `tar.Unpack`.
-
-Note that using an asynchronous stream type with the `transform`
-option will cause undefined behavior in sync unpack streams.
-[MiniPass](http://npm.im/minipass)-based streams are designed for this
-use case.
-
-### class tar.Parse
-
-A writable stream that parses a tar archive stream. All the standard
-writable stream stuff is supported.
-
-If the archive is gzipped, then tar will detect this and unzip it.
-
-Emits `'entry'` events with `tar.ReadEntry` objects, which are
-themselves readable streams that you can pipe wherever.
-
-Each `entry` will not emit until the one before it is flushed through,
-so make sure to either consume the data (with `on('data', ...)` or
-`.pipe(...)`) or throw it away with `.resume()` to keep the stream
-flowing.
-
-#### constructor(options)
-
-Returns an event emitter that emits `entry` events with
-`tar.ReadEntry` objects.
-
-The following options are supported:
-
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `filter` A function that gets called with `(path, entry)` for each
- entry being listed. Return `true` to emit the entry from the
- archive, or `false` to skip it.
-- `onentry` A function that gets called with `(entry)` for each entry
- that passes the filter.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-
-#### abort(error)
-
-Stop all parsing activities. This is called when there are zlib
-errors. It also emits an unrecoverable warning with the error provided.
-
-### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass)
-
-A representation of an entry that is being read out of a tar archive.
-
-It has the following fields:
-
-- `extended` The extended metadata object provided to the constructor.
-- `globalExtended` The global extended metadata object provided to the
- constructor.
-- `remain` The number of bytes remaining to be written into the
- stream.
-- `blockRemain` The number of 512-byte blocks remaining to be written
- into the stream.
-- `ignore` Whether this entry should be ignored.
-- `meta` True if this represents metadata about the next entry, false
- if it represents a filesystem object.
-- All the fields from the header, extended header, and global extended
- header are added to the ReadEntry object. So it has `path`, `type`,
- `size`, `mode`, and so on.
-
-#### constructor(header, extended, globalExtended)
-
-Create a new ReadEntry object with the specified header, extended
-header, and global extended header values.
-
-### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass)
-
-A representation of an entry that is being written from the file
-system into a tar archive.
-
-Emits data for the Header, and for the Pax Extended Header if one is
-required, as well as any body data.
-
-Creating a WriteEntry for a directory does not also create
-WriteEntry objects for all of the directory contents.
-
-It has the following fields:
-
-- `path` The path field that will be written to the archive. By
- default, this is also the path from the cwd to the file system
- object.
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `myuid` If supported, the uid of the user running the current
- process.
-- `myuser` The `env.USER` string if set, or `''`. Set as the entry
- `uname` field if the file's `uid` matches `this.myuid`.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 1 MB.
-- `linkCache` A Map object containing the device and inode value for
- any file whose nlink is > 1, to identify hard links.
-- `statCache` A Map object that caches calls `lstat`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths.
-- `cwd` The current working directory for creating the archive.
- Defaults to `process.cwd()`.
-- `absolute` The absolute path to the entry on the filesystem. By
- default, this is `path.resolve(this.cwd, this.path)`, but it can be
- overridden explicitly.
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `win32` True if on a windows platform. Causes behavior where paths
- replace `\` with `/` and filenames containing the windows-compatible
- forms of `<|>?:` characters are converted to actual `<|>?:` characters
- in the archive.
-- `noPax` Suppress pax extended headers. Note that this means that
- long paths and linkpaths will be truncated, and large or negative
- numeric values may be interpreted incorrectly.
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
-
-
-#### constructor(path, options)
-
-`path` is the path of the entry as it is written in the archive.
-
-The following options are supported:
-
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `maxReadSize` The maximum buffer size for `fs.read()` operations.
- Defaults to 1 MB.
-- `linkCache` A Map object containing the device and inode value for
- any file whose nlink is > 1, to identify hard links.
-- `statCache` A Map object that caches calls `lstat`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths.
-- `cwd` The current working directory for creating the archive.
- Defaults to `process.cwd()`.
-- `absolute` The absolute path to the entry on the filesystem. By
- default, this is `path.resolve(this.cwd, this.path)`, but it can be
- overridden explicitly.
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `win32` True if on a windows platform. Causes behavior where paths
- replace `\` with `/`.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
-- `umask` Set to restrict the modes on the entries in the archive,
- somewhat like how umask works on file creation. Defaults to
- `process.umask()` on unix systems, or `0o22` on Windows.
-
-#### warn(message, data)
-
-If strict, emit an error with the provided message.
-
-Othewise, emit a `'warn'` event with the provided message and data.
-
-### class tar.WriteEntry.Sync
-
-Synchronous version of tar.WriteEntry
-
-### class tar.WriteEntry.Tar
-
-A version of tar.WriteEntry that gets its data from a tar.ReadEntry
-instead of from the filesystem.
-
-#### constructor(readEntry, options)
-
-`readEntry` is the entry being read out of another archive.
-
-The following options are supported:
-
-- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
- `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary for other
- time-based operations. Additionally, `mode` is set to a "reasonable
- default" for most unix systems, based on a `umask` value of `0o22`.
-- `preservePaths` Allow absolute paths. By default, `/` is stripped
- from absolute paths.
-- `strict` Treat warnings as crash-worthy errors. Default false.
-- `onwarn` A function that will get called with `(code, message, data)` for
- any warnings encountered. (See "Warnings and Errors")
-- `noMtime` Set to true to omit writing `mtime` values for entries.
- Note that this prevents using other mtime-based features like
- `tar.update` or the `keepNewer` option with the resulting tar archive.
-
-### class tar.Header
-
-A class for reading and writing header blocks.
-
-It has the following fields:
-
-- `nullBlock` True if decoding a block which is entirely composed of
- `0x00` null bytes. (Useful because tar files are terminated by
- at least 2 null blocks.)
-- `cksumValid` True if the checksum in the header is valid, false
- otherwise.
-- `needPax` True if the values, as encoded, will require a Pax
- extended header.
-- `path` The path of the entry.
-- `mode` The 4 lowest-order octal digits of the file mode. That is,
- read/write/execute permissions for world, group, and owner, and the
- setuid, setgid, and sticky bits.
-- `uid` Numeric user id of the file owner
-- `gid` Numeric group id of the file owner
-- `size` Size of the file in bytes
-- `mtime` Modified time of the file
-- `cksum` The checksum of the header. This is generated by adding all
- the bytes of the header block, treating the checksum field itself as
- all ascii space characters (that is, `0x20`).
-- `type` The human-readable name of the type of entry this represents,
- or the alphanumeric key if unknown.
-- `typeKey` The alphanumeric key for the type of entry this header
- represents.
-- `linkpath` The target of Link and SymbolicLink entries.
-- `uname` Human-readable user name of the file owner
-- `gname` Human-readable group name of the file owner
-- `devmaj` The major portion of the device number. Always `0` for
- files, directories, and links.
-- `devmin` The minor portion of the device number. Always `0` for
- files, directories, and links.
-- `atime` File access time.
-- `ctime` File change time.
-
-#### constructor(data, [offset=0])
-
-`data` is optional. It is either a Buffer that should be interpreted
-as a tar Header starting at the specified offset and continuing for
-512 bytes, or a data object of keys and values to set on the header
-object, and eventually encode as a tar Header.
-
-#### decode(block, offset)
-
-Decode the provided buffer starting at the specified offset.
-
-Buffer length must be greater than 512 bytes.
-
-#### set(data)
-
-Set the fields in the data object.
-
-#### encode(buffer, offset)
-
-Encode the header fields into the buffer at the specified offset.
-
-Returns `this.needPax` to indicate whether a Pax Extended Header is
-required to properly encode the specified data.
-
-### class tar.Pax
-
-An object representing a set of key-value pairs in an Pax extended
-header entry.
-
-It has the following fields. Where the same name is used, they have
-the same semantics as the tar.Header field of the same name.
-
-- `global` True if this represents a global extended header, or false
- if it is for a single entry.
-- `atime`
-- `charset`
-- `comment`
-- `ctime`
-- `gid`
-- `gname`
-- `linkpath`
-- `mtime`
-- `path`
-- `size`
-- `uid`
-- `uname`
-- `dev`
-- `ino`
-- `nlink`
-
-#### constructor(object, global)
-
-Set the fields set in the object. `global` is a boolean that defaults
-to false.
-
-#### encode()
-
-Return a Buffer containing the header and body for the Pax extended
-header entry, or `null` if there is nothing to encode.
-
-#### encodeBody()
-
-Return a string representing the body of the pax extended header
-entry.
-
-#### encodeField(fieldName)
-
-Return a string representing the key/value encoding for the specified
-fieldName, or `''` if the field is unset.
-
-### tar.Pax.parse(string, extended, global)
-
-Return a new Pax object created by parsing the contents of the string
-provided.
-
-If the `extended` object is set, then also add the fields from that
-object. (This is necessary because multiple metadata entries can
-occur in sequence.)
-
-### tar.types
-
-A translation table for the `type` field in tar headers.
-
-#### tar.types.name.get(code)
-
-Get the human-readable name for a given alphanumeric code.
-
-#### tar.types.code.get(name)
-
-Get the alphanumeric code for a given human-readable name.
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/index.js b/sandbox/testAppNevena/Front/node_modules/tar/index.js
deleted file mode 100644
index c9ae06e7..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/index.js
+++ /dev/null
@@ -1,18 +0,0 @@
-'use strict'
-
-// high-level commands
-exports.c = exports.create = require('./lib/create.js')
-exports.r = exports.replace = require('./lib/replace.js')
-exports.t = exports.list = require('./lib/list.js')
-exports.u = exports.update = require('./lib/update.js')
-exports.x = exports.extract = require('./lib/extract.js')
-
-// classes
-exports.Pack = require('./lib/pack.js')
-exports.Unpack = require('./lib/unpack.js')
-exports.Parse = require('./lib/parse.js')
-exports.ReadEntry = require('./lib/read-entry.js')
-exports.WriteEntry = require('./lib/write-entry.js')
-exports.Header = require('./lib/header.js')
-exports.Pax = require('./lib/pax.js')
-exports.types = require('./lib/types.js')
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/create.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/create.js
deleted file mode 100644
index d033640a..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/create.js
+++ /dev/null
@@ -1,104 +0,0 @@
-'use strict'
-
-// tar -c
-const hlo = require('./high-level-opt.js')
-
-const Pack = require('./pack.js')
-const fsm = require('fs-minipass')
-const t = require('./list.js')
-const path = require('path')
-
-module.exports = (opt_, files, cb) => {
- if (typeof files === 'function')
- cb = files
-
- if (Array.isArray(opt_))
- files = opt_, opt_ = {}
-
- if (!files || !Array.isArray(files) || !files.length)
- throw new TypeError('no files or directories specified')
-
- files = Array.from(files)
-
- const opt = hlo(opt_)
-
- if (opt.sync && typeof cb === 'function')
- throw new TypeError('callback not supported for sync tar functions')
-
- if (!opt.file && typeof cb === 'function')
- throw new TypeError('callback only supported with file option')
-
- return opt.file && opt.sync ? createFileSync(opt, files)
- : opt.file ? createFile(opt, files, cb)
- : opt.sync ? createSync(opt, files)
- : create(opt, files)
-}
-
-const createFileSync = (opt, files) => {
- const p = new Pack.Sync(opt)
- const stream = new fsm.WriteStreamSync(opt.file, {
- mode: opt.mode || 0o666,
- })
- p.pipe(stream)
- addFilesSync(p, files)
-}
-
-const createFile = (opt, files, cb) => {
- const p = new Pack(opt)
- const stream = new fsm.WriteStream(opt.file, {
- mode: opt.mode || 0o666,
- })
- p.pipe(stream)
-
- const promise = new Promise((res, rej) => {
- stream.on('error', rej)
- stream.on('close', res)
- p.on('error', rej)
- })
-
- addFilesAsync(p, files)
-
- return cb ? promise.then(cb, cb) : promise
-}
-
-const addFilesSync = (p, files) => {
- files.forEach(file => {
- if (file.charAt(0) === '@') {
- t({
- file: path.resolve(p.cwd, file.substr(1)),
- sync: true,
- noResume: true,
- onentry: entry => p.add(entry),
- })
- } else
- p.add(file)
- })
- p.end()
-}
-
-const addFilesAsync = (p, files) => {
- while (files.length) {
- const file = files.shift()
- if (file.charAt(0) === '@') {
- return t({
- file: path.resolve(p.cwd, file.substr(1)),
- noResume: true,
- onentry: entry => p.add(entry),
- }).then(_ => addFilesAsync(p, files))
- } else
- p.add(file)
- }
- p.end()
-}
-
-const createSync = (opt, files) => {
- const p = new Pack.Sync(opt)
- addFilesSync(p, files)
- return p
-}
-
-const create = (opt, files) => {
- const p = new Pack(opt)
- addFilesAsync(p, files)
- return p
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/extract.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/extract.js
deleted file mode 100644
index 98e946ec..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/extract.js
+++ /dev/null
@@ -1,107 +0,0 @@
-'use strict'
-
-// tar -x
-const hlo = require('./high-level-opt.js')
-const Unpack = require('./unpack.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const path = require('path')
-const stripSlash = require('./strip-trailing-slashes.js')
-
-module.exports = (opt_, files, cb) => {
- if (typeof opt_ === 'function')
- cb = opt_, files = null, opt_ = {}
- else if (Array.isArray(opt_))
- files = opt_, opt_ = {}
-
- if (typeof files === 'function')
- cb = files, files = null
-
- if (!files)
- files = []
- else
- files = Array.from(files)
-
- const opt = hlo(opt_)
-
- if (opt.sync && typeof cb === 'function')
- throw new TypeError('callback not supported for sync tar functions')
-
- if (!opt.file && typeof cb === 'function')
- throw new TypeError('callback only supported with file option')
-
- if (files.length)
- filesFilter(opt, files)
-
- return opt.file && opt.sync ? extractFileSync(opt)
- : opt.file ? extractFile(opt, cb)
- : opt.sync ? extractSync(opt)
- : extract(opt)
-}
-
-// construct a filter that limits the file entries listed
-// include child entries if a dir is included
-const filesFilter = (opt, files) => {
- const map = new Map(files.map(f => [stripSlash(f), true]))
- const filter = opt.filter
-
- const mapHas = (file, r) => {
- const root = r || path.parse(file).root || '.'
- const ret = file === root ? false
- : map.has(file) ? map.get(file)
- : mapHas(path.dirname(file), root)
-
- map.set(file, ret)
- return ret
- }
-
- opt.filter = filter
- ? (file, entry) => filter(file, entry) && mapHas(stripSlash(file))
- : file => mapHas(stripSlash(file))
-}
-
-const extractFileSync = opt => {
- const u = new Unpack.Sync(opt)
-
- const file = opt.file
- const stat = fs.statSync(file)
- // This trades a zero-byte read() syscall for a stat
- // However, it will usually result in less memory allocation
- const readSize = opt.maxReadSize || 16 * 1024 * 1024
- const stream = new fsm.ReadStreamSync(file, {
- readSize: readSize,
- size: stat.size,
- })
- stream.pipe(u)
-}
-
-const extractFile = (opt, cb) => {
- const u = new Unpack(opt)
- const readSize = opt.maxReadSize || 16 * 1024 * 1024
-
- const file = opt.file
- const p = new Promise((resolve, reject) => {
- u.on('error', reject)
- u.on('close', resolve)
-
- // This trades a zero-byte read() syscall for a stat
- // However, it will usually result in less memory allocation
- fs.stat(file, (er, stat) => {
- if (er)
- reject(er)
- else {
- const stream = new fsm.ReadStream(file, {
- readSize: readSize,
- size: stat.size,
- })
- stream.on('error', reject)
- stream.pipe(u)
- }
- })
- })
- return cb ? p.then(cb, cb) : p
-}
-
-const extractSync = opt => new Unpack.Sync(opt)
-
-const extract = opt => new Unpack(opt)
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/get-write-flag.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/get-write-flag.js
deleted file mode 100644
index e8695999..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/get-write-flag.js
+++ /dev/null
@@ -1,20 +0,0 @@
-// Get the appropriate flag to use for creating files
-// We use fmap on Windows platforms for files less than
-// 512kb. This is a fairly low limit, but avoids making
-// things slower in some cases. Since most of what this
-// library is used for is extracting tarballs of many
-// relatively small files in npm packages and the like,
-// it can be a big boost on Windows platforms.
-// Only supported in Node v12.9.0 and above.
-const platform = process.env.__FAKE_PLATFORM__ || process.platform
-const isWindows = platform === 'win32'
-const fs = global.__FAKE_TESTING_FS__ || require('fs')
-
-/* istanbul ignore next */
-const { O_CREAT, O_TRUNC, O_WRONLY, UV_FS_O_FILEMAP = 0 } = fs.constants
-
-const fMapEnabled = isWindows && !!UV_FS_O_FILEMAP
-const fMapLimit = 512 * 1024
-const fMapFlag = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY
-module.exports = !fMapEnabled ? () => 'w'
- : size => size < fMapLimit ? fMapFlag : 'w'
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/header.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/header.js
deleted file mode 100644
index 12950404..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/header.js
+++ /dev/null
@@ -1,288 +0,0 @@
-'use strict'
-// parse a 512-byte header block to a data object, or vice-versa
-// encode returns `true` if a pax extended header is needed, because
-// the data could not be faithfully encoded in a simple header.
-// (Also, check header.needPax to see if it needs a pax header.)
-
-const types = require('./types.js')
-const pathModule = require('path').posix
-const large = require('./large-numbers.js')
-
-const SLURP = Symbol('slurp')
-const TYPE = Symbol('type')
-
-class Header {
- constructor (data, off, ex, gex) {
- this.cksumValid = false
- this.needPax = false
- this.nullBlock = false
-
- this.block = null
- this.path = null
- this.mode = null
- this.uid = null
- this.gid = null
- this.size = null
- this.mtime = null
- this.cksum = null
- this[TYPE] = '0'
- this.linkpath = null
- this.uname = null
- this.gname = null
- this.devmaj = 0
- this.devmin = 0
- this.atime = null
- this.ctime = null
-
- if (Buffer.isBuffer(data))
- this.decode(data, off || 0, ex, gex)
- else if (data)
- this.set(data)
- }
-
- decode (buf, off, ex, gex) {
- if (!off)
- off = 0
-
- if (!buf || !(buf.length >= off + 512))
- throw new Error('need 512 bytes for header')
-
- this.path = decString(buf, off, 100)
- this.mode = decNumber(buf, off + 100, 8)
- this.uid = decNumber(buf, off + 108, 8)
- this.gid = decNumber(buf, off + 116, 8)
- this.size = decNumber(buf, off + 124, 12)
- this.mtime = decDate(buf, off + 136, 12)
- this.cksum = decNumber(buf, off + 148, 12)
-
- // if we have extended or global extended headers, apply them now
- // See https://github.com/npm/node-tar/pull/187
- this[SLURP](ex)
- this[SLURP](gex, true)
-
- // old tar versions marked dirs as a file with a trailing /
- this[TYPE] = decString(buf, off + 156, 1)
- if (this[TYPE] === '')
- this[TYPE] = '0'
- if (this[TYPE] === '0' && this.path.substr(-1) === '/')
- this[TYPE] = '5'
-
- // tar implementations sometimes incorrectly put the stat(dir).size
- // as the size in the tarball, even though Directory entries are
- // not able to have any body at all. In the very rare chance that
- // it actually DOES have a body, we weren't going to do anything with
- // it anyway, and it'll just be a warning about an invalid header.
- if (this[TYPE] === '5')
- this.size = 0
-
- this.linkpath = decString(buf, off + 157, 100)
- if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') {
- this.uname = decString(buf, off + 265, 32)
- this.gname = decString(buf, off + 297, 32)
- this.devmaj = decNumber(buf, off + 329, 8)
- this.devmin = decNumber(buf, off + 337, 8)
- if (buf[off + 475] !== 0) {
- // definitely a prefix, definitely >130 chars.
- const prefix = decString(buf, off + 345, 155)
- this.path = prefix + '/' + this.path
- } else {
- const prefix = decString(buf, off + 345, 130)
- if (prefix)
- this.path = prefix + '/' + this.path
- this.atime = decDate(buf, off + 476, 12)
- this.ctime = decDate(buf, off + 488, 12)
- }
- }
-
- let sum = 8 * 0x20
- for (let i = off; i < off + 148; i++)
- sum += buf[i]
-
- for (let i = off + 156; i < off + 512; i++)
- sum += buf[i]
-
- this.cksumValid = sum === this.cksum
- if (this.cksum === null && sum === 8 * 0x20)
- this.nullBlock = true
- }
-
- [SLURP] (ex, global) {
- for (const k in ex) {
- // we slurp in everything except for the path attribute in
- // a global extended header, because that's weird.
- if (ex[k] !== null && ex[k] !== undefined &&
- !(global && k === 'path'))
- this[k] = ex[k]
- }
- }
-
- encode (buf, off) {
- if (!buf) {
- buf = this.block = Buffer.alloc(512)
- off = 0
- }
-
- if (!off)
- off = 0
-
- if (!(buf.length >= off + 512))
- throw new Error('need 512 bytes for header')
-
- const prefixSize = this.ctime || this.atime ? 130 : 155
- const split = splitPrefix(this.path || '', prefixSize)
- const path = split[0]
- const prefix = split[1]
- this.needPax = split[2]
-
- this.needPax = encString(buf, off, 100, path) || this.needPax
- this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax
- this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax
- this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax
- this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax
- this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax
- buf[off + 156] = this[TYPE].charCodeAt(0)
- this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax
- buf.write('ustar\u000000', off + 257, 8)
- this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax
- this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax
- this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax
- this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax
- this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax
- if (buf[off + 475] !== 0)
- this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax
- else {
- this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax
- this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax
- this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax
- }
-
- let sum = 8 * 0x20
- for (let i = off; i < off + 148; i++)
- sum += buf[i]
-
- for (let i = off + 156; i < off + 512; i++)
- sum += buf[i]
-
- this.cksum = sum
- encNumber(buf, off + 148, 8, this.cksum)
- this.cksumValid = true
-
- return this.needPax
- }
-
- set (data) {
- for (const i in data) {
- if (data[i] !== null && data[i] !== undefined)
- this[i] = data[i]
- }
- }
-
- get type () {
- return types.name.get(this[TYPE]) || this[TYPE]
- }
-
- get typeKey () {
- return this[TYPE]
- }
-
- set type (type) {
- if (types.code.has(type))
- this[TYPE] = types.code.get(type)
- else
- this[TYPE] = type
- }
-}
-
-const splitPrefix = (p, prefixSize) => {
- const pathSize = 100
- let pp = p
- let prefix = ''
- let ret
- const root = pathModule.parse(p).root || '.'
-
- if (Buffer.byteLength(pp) < pathSize)
- ret = [pp, prefix, false]
- else {
- // first set prefix to the dir, and path to the base
- prefix = pathModule.dirname(pp)
- pp = pathModule.basename(pp)
-
- do {
- // both fit!
- if (Buffer.byteLength(pp) <= pathSize &&
- Buffer.byteLength(prefix) <= prefixSize)
- ret = [pp, prefix, false]
-
- // prefix fits in prefix, but path doesn't fit in path
- else if (Buffer.byteLength(pp) > pathSize &&
- Buffer.byteLength(prefix) <= prefixSize)
- ret = [pp.substr(0, pathSize - 1), prefix, true]
-
- else {
- // make path take a bit from prefix
- pp = pathModule.join(pathModule.basename(prefix), pp)
- prefix = pathModule.dirname(prefix)
- }
- } while (prefix !== root && !ret)
-
- // at this point, found no resolution, just truncate
- if (!ret)
- ret = [p.substr(0, pathSize - 1), '', true]
- }
- return ret
-}
-
-const decString = (buf, off, size) =>
- buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '')
-
-const decDate = (buf, off, size) =>
- numToDate(decNumber(buf, off, size))
-
-const numToDate = num => num === null ? null : new Date(num * 1000)
-
-const decNumber = (buf, off, size) =>
- buf[off] & 0x80 ? large.parse(buf.slice(off, off + size))
- : decSmallNumber(buf, off, size)
-
-const nanNull = value => isNaN(value) ? null : value
-
-const decSmallNumber = (buf, off, size) =>
- nanNull(parseInt(
- buf.slice(off, off + size)
- .toString('utf8').replace(/\0.*$/, '').trim(), 8))
-
-// the maximum encodable as a null-terminated octal, by field size
-const MAXNUM = {
- 12: 0o77777777777,
- 8: 0o7777777,
-}
-
-const encNumber = (buf, off, size, number) =>
- number === null ? false :
- number > MAXNUM[size] || number < 0
- ? (large.encode(number, buf.slice(off, off + size)), true)
- : (encSmallNumber(buf, off, size, number), false)
-
-const encSmallNumber = (buf, off, size, number) =>
- buf.write(octalString(number, size), off, size, 'ascii')
-
-const octalString = (number, size) =>
- padOctal(Math.floor(number).toString(8), size)
-
-const padOctal = (string, size) =>
- (string.length === size - 1 ? string
- : new Array(size - string.length - 1).join('0') + string + ' ') + '\0'
-
-const encDate = (buf, off, size, date) =>
- date === null ? false :
- encNumber(buf, off, size, date.getTime() / 1000)
-
-// enough to fill the longest string we've got
-const NULLS = new Array(156).join('\0')
-// pad with nulls, return true if it's longer or non-ascii
-const encString = (buf, off, size, string) =>
- string === null ? false :
- (buf.write(string + NULLS, off, size, 'utf8'),
- string.length !== Buffer.byteLength(string) || string.length > size)
-
-module.exports = Header
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/high-level-opt.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/high-level-opt.js
deleted file mode 100644
index 40e44180..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/high-level-opt.js
+++ /dev/null
@@ -1,29 +0,0 @@
-'use strict'
-
-// turn tar(1) style args like `C` into the more verbose things like `cwd`
-
-const argmap = new Map([
- ['C', 'cwd'],
- ['f', 'file'],
- ['z', 'gzip'],
- ['P', 'preservePaths'],
- ['U', 'unlink'],
- ['strip-components', 'strip'],
- ['stripComponents', 'strip'],
- ['keep-newer', 'newer'],
- ['keepNewer', 'newer'],
- ['keep-newer-files', 'newer'],
- ['keepNewerFiles', 'newer'],
- ['k', 'keep'],
- ['keep-existing', 'keep'],
- ['keepExisting', 'keep'],
- ['m', 'noMtime'],
- ['no-mtime', 'noMtime'],
- ['p', 'preserveOwner'],
- ['L', 'follow'],
- ['h', 'follow'],
-])
-
-module.exports = opt => opt ? Object.keys(opt).map(k => [
- argmap.has(k) ? argmap.get(k) : k, opt[k],
-]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/large-numbers.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/large-numbers.js
deleted file mode 100644
index dd6f690b..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/large-numbers.js
+++ /dev/null
@@ -1,99 +0,0 @@
-'use strict'
-// Tar can encode large and negative numbers using a leading byte of
-// 0xff for negative, and 0x80 for positive.
-
-const encode = (num, buf) => {
- if (!Number.isSafeInteger(num))
- // The number is so large that javascript cannot represent it with integer
- // precision.
- throw Error('cannot encode number outside of javascript safe integer range')
- else if (num < 0)
- encodeNegative(num, buf)
- else
- encodePositive(num, buf)
- return buf
-}
-
-const encodePositive = (num, buf) => {
- buf[0] = 0x80
-
- for (var i = buf.length; i > 1; i--) {
- buf[i - 1] = num & 0xff
- num = Math.floor(num / 0x100)
- }
-}
-
-const encodeNegative = (num, buf) => {
- buf[0] = 0xff
- var flipped = false
- num = num * -1
- for (var i = buf.length; i > 1; i--) {
- var byte = num & 0xff
- num = Math.floor(num / 0x100)
- if (flipped)
- buf[i - 1] = onesComp(byte)
- else if (byte === 0)
- buf[i - 1] = 0
- else {
- flipped = true
- buf[i - 1] = twosComp(byte)
- }
- }
-}
-
-const parse = (buf) => {
- const pre = buf[0]
- const value = pre === 0x80 ? pos(buf.slice(1, buf.length))
- : pre === 0xff ? twos(buf)
- : null
- if (value === null)
- throw Error('invalid base256 encoding')
-
- if (!Number.isSafeInteger(value))
- // The number is so large that javascript cannot represent it with integer
- // precision.
- throw Error('parsed number outside of javascript safe integer range')
-
- return value
-}
-
-const twos = (buf) => {
- var len = buf.length
- var sum = 0
- var flipped = false
- for (var i = len - 1; i > -1; i--) {
- var byte = buf[i]
- var f
- if (flipped)
- f = onesComp(byte)
- else if (byte === 0)
- f = byte
- else {
- flipped = true
- f = twosComp(byte)
- }
- if (f !== 0)
- sum -= f * Math.pow(256, len - i - 1)
- }
- return sum
-}
-
-const pos = (buf) => {
- var len = buf.length
- var sum = 0
- for (var i = len - 1; i > -1; i--) {
- var byte = buf[i]
- if (byte !== 0)
- sum += byte * Math.pow(256, len - i - 1)
- }
- return sum
-}
-
-const onesComp = byte => (0xff ^ byte) & 0xff
-
-const twosComp = byte => ((0xff ^ byte) + 1) & 0xff
-
-module.exports = {
- encode,
- parse,
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/list.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/list.js
deleted file mode 100644
index a0c1cf2f..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/list.js
+++ /dev/null
@@ -1,132 +0,0 @@
-'use strict'
-
-// XXX: This shares a lot in common with extract.js
-// maybe some DRY opportunity here?
-
-// tar -t
-const hlo = require('./high-level-opt.js')
-const Parser = require('./parse.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const path = require('path')
-const stripSlash = require('./strip-trailing-slashes.js')
-
-module.exports = (opt_, files, cb) => {
- if (typeof opt_ === 'function')
- cb = opt_, files = null, opt_ = {}
- else if (Array.isArray(opt_))
- files = opt_, opt_ = {}
-
- if (typeof files === 'function')
- cb = files, files = null
-
- if (!files)
- files = []
- else
- files = Array.from(files)
-
- const opt = hlo(opt_)
-
- if (opt.sync && typeof cb === 'function')
- throw new TypeError('callback not supported for sync tar functions')
-
- if (!opt.file && typeof cb === 'function')
- throw new TypeError('callback only supported with file option')
-
- if (files.length)
- filesFilter(opt, files)
-
- if (!opt.noResume)
- onentryFunction(opt)
-
- return opt.file && opt.sync ? listFileSync(opt)
- : opt.file ? listFile(opt, cb)
- : list(opt)
-}
-
-const onentryFunction = opt => {
- const onentry = opt.onentry
- opt.onentry = onentry ? e => {
- onentry(e)
- e.resume()
- } : e => e.resume()
-}
-
-// construct a filter that limits the file entries listed
-// include child entries if a dir is included
-const filesFilter = (opt, files) => {
- const map = new Map(files.map(f => [stripSlash(f), true]))
- const filter = opt.filter
-
- const mapHas = (file, r) => {
- const root = r || path.parse(file).root || '.'
- const ret = file === root ? false
- : map.has(file) ? map.get(file)
- : mapHas(path.dirname(file), root)
-
- map.set(file, ret)
- return ret
- }
-
- opt.filter = filter
- ? (file, entry) => filter(file, entry) && mapHas(stripSlash(file))
- : file => mapHas(stripSlash(file))
-}
-
-const listFileSync = opt => {
- const p = list(opt)
- const file = opt.file
- let threw = true
- let fd
- try {
- const stat = fs.statSync(file)
- const readSize = opt.maxReadSize || 16 * 1024 * 1024
- if (stat.size < readSize)
- p.end(fs.readFileSync(file))
- else {
- let pos = 0
- const buf = Buffer.allocUnsafe(readSize)
- fd = fs.openSync(file, 'r')
- while (pos < stat.size) {
- const bytesRead = fs.readSync(fd, buf, 0, readSize, pos)
- pos += bytesRead
- p.write(buf.slice(0, bytesRead))
- }
- p.end()
- }
- threw = false
- } finally {
- if (threw && fd) {
- try {
- fs.closeSync(fd)
- } catch (er) {}
- }
- }
-}
-
-const listFile = (opt, cb) => {
- const parse = new Parser(opt)
- const readSize = opt.maxReadSize || 16 * 1024 * 1024
-
- const file = opt.file
- const p = new Promise((resolve, reject) => {
- parse.on('error', reject)
- parse.on('end', resolve)
-
- fs.stat(file, (er, stat) => {
- if (er)
- reject(er)
- else {
- const stream = new fsm.ReadStream(file, {
- readSize: readSize,
- size: stat.size,
- })
- stream.on('error', reject)
- stream.pipe(parse)
- }
- })
- })
- return cb ? p.then(cb, cb) : p
-}
-
-const list = opt => new Parser(opt)
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/mkdir.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/mkdir.js
deleted file mode 100644
index a0719e6c..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/mkdir.js
+++ /dev/null
@@ -1,213 +0,0 @@
-'use strict'
-// wrapper around mkdirp for tar's needs.
-
-// TODO: This should probably be a class, not functionally
-// passing around state in a gazillion args.
-
-const mkdirp = require('mkdirp')
-const fs = require('fs')
-const path = require('path')
-const chownr = require('chownr')
-const normPath = require('./normalize-windows-path.js')
-
-class SymlinkError extends Error {
- constructor (symlink, path) {
- super('Cannot extract through symbolic link')
- this.path = path
- this.symlink = symlink
- }
-
- get name () {
- return 'SylinkError'
- }
-}
-
-class CwdError extends Error {
- constructor (path, code) {
- super(code + ': Cannot cd into \'' + path + '\'')
- this.path = path
- this.code = code
- }
-
- get name () {
- return 'CwdError'
- }
-}
-
-const cGet = (cache, key) => cache.get(normPath(key))
-const cSet = (cache, key, val) => cache.set(normPath(key), val)
-
-const checkCwd = (dir, cb) => {
- fs.stat(dir, (er, st) => {
- if (er || !st.isDirectory())
- er = new CwdError(dir, er && er.code || 'ENOTDIR')
- cb(er)
- })
-}
-
-module.exports = (dir, opt, cb) => {
- dir = normPath(dir)
-
- // if there's any overlap between mask and mode,
- // then we'll need an explicit chmod
- const umask = opt.umask
- const mode = opt.mode | 0o0700
- const needChmod = (mode & umask) !== 0
-
- const uid = opt.uid
- const gid = opt.gid
- const doChown = typeof uid === 'number' &&
- typeof gid === 'number' &&
- (uid !== opt.processUid || gid !== opt.processGid)
-
- const preserve = opt.preserve
- const unlink = opt.unlink
- const cache = opt.cache
- const cwd = normPath(opt.cwd)
-
- const done = (er, created) => {
- if (er)
- cb(er)
- else {
- cSet(cache, dir, true)
- if (created && doChown)
- chownr(created, uid, gid, er => done(er))
- else if (needChmod)
- fs.chmod(dir, mode, cb)
- else
- cb()
- }
- }
-
- if (cache && cGet(cache, dir) === true)
- return done()
-
- if (dir === cwd)
- return checkCwd(dir, done)
-
- if (preserve)
- return mkdirp(dir, {mode}).then(made => done(null, made), done)
-
- const sub = normPath(path.relative(cwd, dir))
- const parts = sub.split('/')
- mkdir_(cwd, parts, mode, cache, unlink, cwd, null, done)
-}
-
-const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => {
- if (!parts.length)
- return cb(null, created)
- const p = parts.shift()
- const part = normPath(path.resolve(base + '/' + p))
- if (cGet(cache, part))
- return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
- fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
-}
-
-const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => er => {
- if (er) {
- fs.lstat(part, (statEr, st) => {
- if (statEr) {
- statEr.path = statEr.path && normPath(statEr.path)
- cb(statEr)
- } else if (st.isDirectory())
- mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
- else if (unlink) {
- fs.unlink(part, er => {
- if (er)
- return cb(er)
- fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
- })
- } else if (st.isSymbolicLink())
- return cb(new SymlinkError(part, part + '/' + parts.join('/')))
- else
- cb(er)
- })
- } else {
- created = created || part
- mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
- }
-}
-
-const checkCwdSync = dir => {
- let ok = false
- let code = 'ENOTDIR'
- try {
- ok = fs.statSync(dir).isDirectory()
- } catch (er) {
- code = er.code
- } finally {
- if (!ok)
- throw new CwdError(dir, code)
- }
-}
-
-module.exports.sync = (dir, opt) => {
- dir = normPath(dir)
- // if there's any overlap between mask and mode,
- // then we'll need an explicit chmod
- const umask = opt.umask
- const mode = opt.mode | 0o0700
- const needChmod = (mode & umask) !== 0
-
- const uid = opt.uid
- const gid = opt.gid
- const doChown = typeof uid === 'number' &&
- typeof gid === 'number' &&
- (uid !== opt.processUid || gid !== opt.processGid)
-
- const preserve = opt.preserve
- const unlink = opt.unlink
- const cache = opt.cache
- const cwd = normPath(opt.cwd)
-
- const done = (created) => {
- cSet(cache, dir, true)
- if (created && doChown)
- chownr.sync(created, uid, gid)
- if (needChmod)
- fs.chmodSync(dir, mode)
- }
-
- if (cache && cGet(cache, dir) === true)
- return done()
-
- if (dir === cwd) {
- checkCwdSync(cwd)
- return done()
- }
-
- if (preserve)
- return done(mkdirp.sync(dir, mode))
-
- const sub = normPath(path.relative(cwd, dir))
- const parts = sub.split('/')
- let created = null
- for (let p = parts.shift(), part = cwd;
- p && (part += '/' + p);
- p = parts.shift()) {
- part = normPath(path.resolve(part))
- if (cGet(cache, part))
- continue
-
- try {
- fs.mkdirSync(part, mode)
- created = created || part
- cSet(cache, part, true)
- } catch (er) {
- const st = fs.lstatSync(part)
- if (st.isDirectory()) {
- cSet(cache, part, true)
- continue
- } else if (unlink) {
- fs.unlinkSync(part)
- fs.mkdirSync(part, mode)
- created = created || part
- cSet(cache, part, true)
- continue
- } else if (st.isSymbolicLink())
- return new SymlinkError(part, part + '/' + parts.join('/'))
- }
- }
-
- return done(created)
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/mode-fix.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/mode-fix.js
deleted file mode 100644
index 6a045ffc..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/mode-fix.js
+++ /dev/null
@@ -1,23 +0,0 @@
-'use strict'
-module.exports = (mode, isDir, portable) => {
- mode &= 0o7777
-
- // in portable mode, use the minimum reasonable umask
- // if this system creates files with 0o664 by default
- // (as some linux distros do), then we'll write the
- // archive with 0o644 instead. Also, don't ever create
- // a file that is not readable/writable by the owner.
- if (portable)
- mode = (mode | 0o600) & ~0o22
-
- // if dirs are readable, then they should be listable
- if (isDir) {
- if (mode & 0o400)
- mode |= 0o100
- if (mode & 0o40)
- mode |= 0o10
- if (mode & 0o4)
- mode |= 0o1
- }
- return mode
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/normalize-unicode.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/normalize-unicode.js
deleted file mode 100644
index 4aeb1d50..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/normalize-unicode.js
+++ /dev/null
@@ -1,11 +0,0 @@
-// warning: extremely hot code path.
-// This has been meticulously optimized for use
-// within npm install on large package trees.
-// Do not edit without careful benchmarking.
-const normalizeCache = Object.create(null)
-const {hasOwnProperty} = Object.prototype
-module.exports = s => {
- if (!hasOwnProperty.call(normalizeCache, s))
- normalizeCache[s] = s.normalize('NFKD')
- return normalizeCache[s]
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/normalize-windows-path.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/normalize-windows-path.js
deleted file mode 100644
index eb13ba01..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/normalize-windows-path.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// on windows, either \ or / are valid directory separators.
-// on unix, \ is a valid character in filenames.
-// so, on windows, and only on windows, we replace all \ chars with /,
-// so that we can use / as our one and only directory separator char.
-
-const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
-module.exports = platform !== 'win32' ? p => p
- : p => p && p.replace(/\\/g, '/')
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/pack.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/pack.js
deleted file mode 100644
index 9522c10b..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/pack.js
+++ /dev/null
@@ -1,397 +0,0 @@
-'use strict'
-
-// A readable tar stream creator
-// Technically, this is a transform stream that you write paths into,
-// and tar format comes out of.
-// The `add()` method is like `write()` but returns this,
-// and end() return `this` as well, so you can
-// do `new Pack(opt).add('files').add('dir').end().pipe(output)
-// You could also do something like:
-// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
-
-class PackJob {
- constructor (path, absolute) {
- this.path = path || './'
- this.absolute = absolute
- this.entry = null
- this.stat = null
- this.readdir = null
- this.pending = false
- this.ignore = false
- this.piped = false
- }
-}
-
-const MiniPass = require('minipass')
-const zlib = require('minizlib')
-const ReadEntry = require('./read-entry.js')
-const WriteEntry = require('./write-entry.js')
-const WriteEntrySync = WriteEntry.Sync
-const WriteEntryTar = WriteEntry.Tar
-const Yallist = require('yallist')
-const EOF = Buffer.alloc(1024)
-const ONSTAT = Symbol('onStat')
-const ENDED = Symbol('ended')
-const QUEUE = Symbol('queue')
-const CURRENT = Symbol('current')
-const PROCESS = Symbol('process')
-const PROCESSING = Symbol('processing')
-const PROCESSJOB = Symbol('processJob')
-const JOBS = Symbol('jobs')
-const JOBDONE = Symbol('jobDone')
-const ADDFSENTRY = Symbol('addFSEntry')
-const ADDTARENTRY = Symbol('addTarEntry')
-const STAT = Symbol('stat')
-const READDIR = Symbol('readdir')
-const ONREADDIR = Symbol('onreaddir')
-const PIPE = Symbol('pipe')
-const ENTRY = Symbol('entry')
-const ENTRYOPT = Symbol('entryOpt')
-const WRITEENTRYCLASS = Symbol('writeEntryClass')
-const WRITE = Symbol('write')
-const ONDRAIN = Symbol('ondrain')
-
-const fs = require('fs')
-const path = require('path')
-const warner = require('./warn-mixin.js')
-const normPath = require('./normalize-windows-path.js')
-
-const Pack = warner(class Pack extends MiniPass {
- constructor (opt) {
- super(opt)
- opt = opt || Object.create(null)
- this.opt = opt
- this.file = opt.file || ''
- this.cwd = opt.cwd || process.cwd()
- this.maxReadSize = opt.maxReadSize
- this.preservePaths = !!opt.preservePaths
- this.strict = !!opt.strict
- this.noPax = !!opt.noPax
- this.prefix = normPath(opt.prefix || '')
- this.linkCache = opt.linkCache || new Map()
- this.statCache = opt.statCache || new Map()
- this.readdirCache = opt.readdirCache || new Map()
-
- this[WRITEENTRYCLASS] = WriteEntry
- if (typeof opt.onwarn === 'function')
- this.on('warn', opt.onwarn)
-
- this.portable = !!opt.portable
- this.zip = null
- if (opt.gzip) {
- if (typeof opt.gzip !== 'object')
- opt.gzip = {}
- if (this.portable)
- opt.gzip.portable = true
- this.zip = new zlib.Gzip(opt.gzip)
- this.zip.on('data', chunk => super.write(chunk))
- this.zip.on('end', _ => super.end())
- this.zip.on('drain', _ => this[ONDRAIN]())
- this.on('resume', _ => this.zip.resume())
- } else
- this.on('drain', this[ONDRAIN])
-
- this.noDirRecurse = !!opt.noDirRecurse
- this.follow = !!opt.follow
- this.noMtime = !!opt.noMtime
- this.mtime = opt.mtime || null
-
- this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
-
- this[QUEUE] = new Yallist()
- this[JOBS] = 0
- this.jobs = +opt.jobs || 4
- this[PROCESSING] = false
- this[ENDED] = false
- }
-
- [WRITE] (chunk) {
- return super.write(chunk)
- }
-
- add (path) {
- this.write(path)
- return this
- }
-
- end (path) {
- if (path)
- this.write(path)
- this[ENDED] = true
- this[PROCESS]()
- return this
- }
-
- write (path) {
- if (this[ENDED])
- throw new Error('write after end')
-
- if (path instanceof ReadEntry)
- this[ADDTARENTRY](path)
- else
- this[ADDFSENTRY](path)
- return this.flowing
- }
-
- [ADDTARENTRY] (p) {
- const absolute = normPath(path.resolve(this.cwd, p.path))
- // in this case, we don't have to wait for the stat
- if (!this.filter(p.path, p))
- p.resume()
- else {
- const job = new PackJob(p.path, absolute, false)
- job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
- job.entry.on('end', _ => this[JOBDONE](job))
- this[JOBS] += 1
- this[QUEUE].push(job)
- }
-
- this[PROCESS]()
- }
-
- [ADDFSENTRY] (p) {
- const absolute = normPath(path.resolve(this.cwd, p))
- this[QUEUE].push(new PackJob(p, absolute))
- this[PROCESS]()
- }
-
- [STAT] (job) {
- job.pending = true
- this[JOBS] += 1
- const stat = this.follow ? 'stat' : 'lstat'
- fs[stat](job.absolute, (er, stat) => {
- job.pending = false
- this[JOBS] -= 1
- if (er)
- this.emit('error', er)
- else
- this[ONSTAT](job, stat)
- })
- }
-
- [ONSTAT] (job, stat) {
- this.statCache.set(job.absolute, stat)
- job.stat = stat
-
- // now we have the stat, we can filter it.
- if (!this.filter(job.path, stat))
- job.ignore = true
-
- this[PROCESS]()
- }
-
- [READDIR] (job) {
- job.pending = true
- this[JOBS] += 1
- fs.readdir(job.absolute, (er, entries) => {
- job.pending = false
- this[JOBS] -= 1
- if (er)
- return this.emit('error', er)
- this[ONREADDIR](job, entries)
- })
- }
-
- [ONREADDIR] (job, entries) {
- this.readdirCache.set(job.absolute, entries)
- job.readdir = entries
- this[PROCESS]()
- }
-
- [PROCESS] () {
- if (this[PROCESSING])
- return
-
- this[PROCESSING] = true
- for (let w = this[QUEUE].head;
- w !== null && this[JOBS] < this.jobs;
- w = w.next) {
- this[PROCESSJOB](w.value)
- if (w.value.ignore) {
- const p = w.next
- this[QUEUE].removeNode(w)
- w.next = p
- }
- }
-
- this[PROCESSING] = false
-
- if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
- if (this.zip)
- this.zip.end(EOF)
- else {
- super.write(EOF)
- super.end()
- }
- }
- }
-
- get [CURRENT] () {
- return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
- }
-
- [JOBDONE] (job) {
- this[QUEUE].shift()
- this[JOBS] -= 1
- this[PROCESS]()
- }
-
- [PROCESSJOB] (job) {
- if (job.pending)
- return
-
- if (job.entry) {
- if (job === this[CURRENT] && !job.piped)
- this[PIPE](job)
- return
- }
-
- if (!job.stat) {
- if (this.statCache.has(job.absolute))
- this[ONSTAT](job, this.statCache.get(job.absolute))
- else
- this[STAT](job)
- }
- if (!job.stat)
- return
-
- // filtered out!
- if (job.ignore)
- return
-
- if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
- if (this.readdirCache.has(job.absolute))
- this[ONREADDIR](job, this.readdirCache.get(job.absolute))
- else
- this[READDIR](job)
- if (!job.readdir)
- return
- }
-
- // we know it doesn't have an entry, because that got checked above
- job.entry = this[ENTRY](job)
- if (!job.entry) {
- job.ignore = true
- return
- }
-
- if (job === this[CURRENT] && !job.piped)
- this[PIPE](job)
- }
-
- [ENTRYOPT] (job) {
- return {
- onwarn: (code, msg, data) => this.warn(code, msg, data),
- noPax: this.noPax,
- cwd: this.cwd,
- absolute: job.absolute,
- preservePaths: this.preservePaths,
- maxReadSize: this.maxReadSize,
- strict: this.strict,
- portable: this.portable,
- linkCache: this.linkCache,
- statCache: this.statCache,
- noMtime: this.noMtime,
- mtime: this.mtime,
- prefix: this.prefix,
- }
- }
-
- [ENTRY] (job) {
- this[JOBS] += 1
- try {
- return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
- .on('end', () => this[JOBDONE](job))
- .on('error', er => this.emit('error', er))
- } catch (er) {
- this.emit('error', er)
- }
- }
-
- [ONDRAIN] () {
- if (this[CURRENT] && this[CURRENT].entry)
- this[CURRENT].entry.resume()
- }
-
- // like .pipe() but using super, because our write() is special
- [PIPE] (job) {
- job.piped = true
-
- if (job.readdir) {
- job.readdir.forEach(entry => {
- const p = job.path
- const base = p === './' ? '' : p.replace(/\/*$/, '/')
- this[ADDFSENTRY](base + entry)
- })
- }
-
- const source = job.entry
- const zip = this.zip
-
- if (zip) {
- source.on('data', chunk => {
- if (!zip.write(chunk))
- source.pause()
- })
- } else {
- source.on('data', chunk => {
- if (!super.write(chunk))
- source.pause()
- })
- }
- }
-
- pause () {
- if (this.zip)
- this.zip.pause()
- return super.pause()
- }
-})
-
-class PackSync extends Pack {
- constructor (opt) {
- super(opt)
- this[WRITEENTRYCLASS] = WriteEntrySync
- }
-
- // pause/resume are no-ops in sync streams.
- pause () {}
- resume () {}
-
- [STAT] (job) {
- const stat = this.follow ? 'statSync' : 'lstatSync'
- this[ONSTAT](job, fs[stat](job.absolute))
- }
-
- [READDIR] (job, stat) {
- this[ONREADDIR](job, fs.readdirSync(job.absolute))
- }
-
- // gotta get it all in this tick
- [PIPE] (job) {
- const source = job.entry
- const zip = this.zip
-
- if (job.readdir) {
- job.readdir.forEach(entry => {
- const p = job.path
- const base = p === './' ? '' : p.replace(/\/*$/, '/')
- this[ADDFSENTRY](base + entry)
- })
- }
-
- if (zip) {
- source.on('data', chunk => {
- zip.write(chunk)
- })
- } else {
- source.on('data', chunk => {
- super[WRITE](chunk)
- })
- }
- }
-}
-
-Pack.Sync = PackSync
-
-module.exports = Pack
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/parse.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/parse.js
deleted file mode 100644
index b1b4e7e4..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/parse.js
+++ /dev/null
@@ -1,481 +0,0 @@
-'use strict'
-
-// this[BUFFER] is the remainder of a chunk if we're waiting for
-// the full 512 bytes of a header to come in. We will Buffer.concat()
-// it to the next write(), which is a mem copy, but a small one.
-//
-// this[QUEUE] is a Yallist of entries that haven't been emitted
-// yet this can only get filled up if the user keeps write()ing after
-// a write() returns false, or does a write() with more than one entry
-//
-// We don't buffer chunks, we always parse them and either create an
-// entry, or push it into the active entry. The ReadEntry class knows
-// to throw data away if .ignore=true
-//
-// Shift entry off the buffer when it emits 'end', and emit 'entry' for
-// the next one in the list.
-//
-// At any time, we're pushing body chunks into the entry at WRITEENTRY,
-// and waiting for 'end' on the entry at READENTRY
-//
-// ignored entries get .resume() called on them straight away
-
-const warner = require('./warn-mixin.js')
-const Header = require('./header.js')
-const EE = require('events')
-const Yallist = require('yallist')
-const maxMetaEntrySize = 1024 * 1024
-const Entry = require('./read-entry.js')
-const Pax = require('./pax.js')
-const zlib = require('minizlib')
-
-const gzipHeader = Buffer.from([0x1f, 0x8b])
-const STATE = Symbol('state')
-const WRITEENTRY = Symbol('writeEntry')
-const READENTRY = Symbol('readEntry')
-const NEXTENTRY = Symbol('nextEntry')
-const PROCESSENTRY = Symbol('processEntry')
-const EX = Symbol('extendedHeader')
-const GEX = Symbol('globalExtendedHeader')
-const META = Symbol('meta')
-const EMITMETA = Symbol('emitMeta')
-const BUFFER = Symbol('buffer')
-const QUEUE = Symbol('queue')
-const ENDED = Symbol('ended')
-const EMITTEDEND = Symbol('emittedEnd')
-const EMIT = Symbol('emit')
-const UNZIP = Symbol('unzip')
-const CONSUMECHUNK = Symbol('consumeChunk')
-const CONSUMECHUNKSUB = Symbol('consumeChunkSub')
-const CONSUMEBODY = Symbol('consumeBody')
-const CONSUMEMETA = Symbol('consumeMeta')
-const CONSUMEHEADER = Symbol('consumeHeader')
-const CONSUMING = Symbol('consuming')
-const BUFFERCONCAT = Symbol('bufferConcat')
-const MAYBEEND = Symbol('maybeEnd')
-const WRITING = Symbol('writing')
-const ABORTED = Symbol('aborted')
-const DONE = Symbol('onDone')
-const SAW_VALID_ENTRY = Symbol('sawValidEntry')
-const SAW_NULL_BLOCK = Symbol('sawNullBlock')
-const SAW_EOF = Symbol('sawEOF')
-
-const noop = _ => true
-
-module.exports = warner(class Parser extends EE {
- constructor (opt) {
- opt = opt || {}
- super(opt)
-
- this.file = opt.file || ''
-
- // set to boolean false when an entry starts. 1024 bytes of \0
- // is technically a valid tarball, albeit a boring one.
- this[SAW_VALID_ENTRY] = null
-
- // these BADARCHIVE errors can't be detected early. listen on DONE.
- this.on(DONE, _ => {
- if (this[STATE] === 'begin' || this[SAW_VALID_ENTRY] === false) {
- // either less than 1 block of data, or all entries were invalid.
- // Either way, probably not even a tarball.
- this.warn('TAR_BAD_ARCHIVE', 'Unrecognized archive format')
- }
- })
-
- if (opt.ondone)
- this.on(DONE, opt.ondone)
- else {
- this.on(DONE, _ => {
- this.emit('prefinish')
- this.emit('finish')
- this.emit('end')
- this.emit('close')
- })
- }
-
- this.strict = !!opt.strict
- this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize
- this.filter = typeof opt.filter === 'function' ? opt.filter : noop
-
- // have to set this so that streams are ok piping into it
- this.writable = true
- this.readable = false
-
- this[QUEUE] = new Yallist()
- this[BUFFER] = null
- this[READENTRY] = null
- this[WRITEENTRY] = null
- this[STATE] = 'begin'
- this[META] = ''
- this[EX] = null
- this[GEX] = null
- this[ENDED] = false
- this[UNZIP] = null
- this[ABORTED] = false
- this[SAW_NULL_BLOCK] = false
- this[SAW_EOF] = false
- if (typeof opt.onwarn === 'function')
- this.on('warn', opt.onwarn)
- if (typeof opt.onentry === 'function')
- this.on('entry', opt.onentry)
- }
-
- [CONSUMEHEADER] (chunk, position) {
- if (this[SAW_VALID_ENTRY] === null)
- this[SAW_VALID_ENTRY] = false
- let header
- try {
- header = new Header(chunk, position, this[EX], this[GEX])
- } catch (er) {
- return this.warn('TAR_ENTRY_INVALID', er)
- }
-
- if (header.nullBlock) {
- if (this[SAW_NULL_BLOCK]) {
- this[SAW_EOF] = true
- // ending an archive with no entries. pointless, but legal.
- if (this[STATE] === 'begin')
- this[STATE] = 'header'
- this[EMIT]('eof')
- } else {
- this[SAW_NULL_BLOCK] = true
- this[EMIT]('nullBlock')
- }
- } else {
- this[SAW_NULL_BLOCK] = false
- if (!header.cksumValid)
- this.warn('TAR_ENTRY_INVALID', 'checksum failure', {header})
- else if (!header.path)
- this.warn('TAR_ENTRY_INVALID', 'path is required', {header})
- else {
- const type = header.type
- if (/^(Symbolic)?Link$/.test(type) && !header.linkpath)
- this.warn('TAR_ENTRY_INVALID', 'linkpath required', {header})
- else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath)
- this.warn('TAR_ENTRY_INVALID', 'linkpath forbidden', {header})
- else {
- const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
-
- // we do this for meta & ignored entries as well, because they
- // are still valid tar, or else we wouldn't know to ignore them
- if (!this[SAW_VALID_ENTRY]) {
- if (entry.remain) {
- // this might be the one!
- const onend = () => {
- if (!entry.invalid)
- this[SAW_VALID_ENTRY] = true
- }
- entry.on('end', onend)
- } else
- this[SAW_VALID_ENTRY] = true
- }
-
- if (entry.meta) {
- if (entry.size > this.maxMetaEntrySize) {
- entry.ignore = true
- this[EMIT]('ignoredEntry', entry)
- this[STATE] = 'ignore'
- entry.resume()
- } else if (entry.size > 0) {
- this[META] = ''
- entry.on('data', c => this[META] += c)
- this[STATE] = 'meta'
- }
- } else {
- this[EX] = null
- entry.ignore = entry.ignore || !this.filter(entry.path, entry)
-
- if (entry.ignore) {
- // probably valid, just not something we care about
- this[EMIT]('ignoredEntry', entry)
- this[STATE] = entry.remain ? 'ignore' : 'header'
- entry.resume()
- } else {
- if (entry.remain)
- this[STATE] = 'body'
- else {
- this[STATE] = 'header'
- entry.end()
- }
-
- if (!this[READENTRY]) {
- this[QUEUE].push(entry)
- this[NEXTENTRY]()
- } else
- this[QUEUE].push(entry)
- }
- }
- }
- }
- }
- }
-
- [PROCESSENTRY] (entry) {
- let go = true
-
- if (!entry) {
- this[READENTRY] = null
- go = false
- } else if (Array.isArray(entry))
- this.emit.apply(this, entry)
- else {
- this[READENTRY] = entry
- this.emit('entry', entry)
- if (!entry.emittedEnd) {
- entry.on('end', _ => this[NEXTENTRY]())
- go = false
- }
- }
-
- return go
- }
-
- [NEXTENTRY] () {
- do {} while (this[PROCESSENTRY](this[QUEUE].shift()))
-
- if (!this[QUEUE].length) {
- // At this point, there's nothing in the queue, but we may have an
- // entry which is being consumed (readEntry).
- // If we don't, then we definitely can handle more data.
- // If we do, and either it's flowing, or it has never had any data
- // written to it, then it needs more.
- // The only other possibility is that it has returned false from a
- // write() call, so we wait for the next drain to continue.
- const re = this[READENTRY]
- const drainNow = !re || re.flowing || re.size === re.remain
- if (drainNow) {
- if (!this[WRITING])
- this.emit('drain')
- } else
- re.once('drain', _ => this.emit('drain'))
- }
- }
-
- [CONSUMEBODY] (chunk, position) {
- // write up to but no more than writeEntry.blockRemain
- const entry = this[WRITEENTRY]
- const br = entry.blockRemain
- const c = (br >= chunk.length && position === 0) ? chunk
- : chunk.slice(position, position + br)
-
- entry.write(c)
-
- if (!entry.blockRemain) {
- this[STATE] = 'header'
- this[WRITEENTRY] = null
- entry.end()
- }
-
- return c.length
- }
-
- [CONSUMEMETA] (chunk, position) {
- const entry = this[WRITEENTRY]
- const ret = this[CONSUMEBODY](chunk, position)
-
- // if we finished, then the entry is reset
- if (!this[WRITEENTRY])
- this[EMITMETA](entry)
-
- return ret
- }
-
- [EMIT] (ev, data, extra) {
- if (!this[QUEUE].length && !this[READENTRY])
- this.emit(ev, data, extra)
- else
- this[QUEUE].push([ev, data, extra])
- }
-
- [EMITMETA] (entry) {
- this[EMIT]('meta', this[META])
- switch (entry.type) {
- case 'ExtendedHeader':
- case 'OldExtendedHeader':
- this[EX] = Pax.parse(this[META], this[EX], false)
- break
-
- case 'GlobalExtendedHeader':
- this[GEX] = Pax.parse(this[META], this[GEX], true)
- break
-
- case 'NextFileHasLongPath':
- case 'OldGnuLongPath':
- this[EX] = this[EX] || Object.create(null)
- this[EX].path = this[META].replace(/\0.*/, '')
- break
-
- case 'NextFileHasLongLinkpath':
- this[EX] = this[EX] || Object.create(null)
- this[EX].linkpath = this[META].replace(/\0.*/, '')
- break
-
- /* istanbul ignore next */
- default: throw new Error('unknown meta: ' + entry.type)
- }
- }
-
- abort (error) {
- this[ABORTED] = true
- this.emit('abort', error)
- // always throws, even in non-strict mode
- this.warn('TAR_ABORT', error, { recoverable: false })
- }
-
- write (chunk) {
- if (this[ABORTED])
- return
-
- // first write, might be gzipped
- if (this[UNZIP] === null && chunk) {
- if (this[BUFFER]) {
- chunk = Buffer.concat([this[BUFFER], chunk])
- this[BUFFER] = null
- }
- if (chunk.length < gzipHeader.length) {
- this[BUFFER] = chunk
- return true
- }
- for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) {
- if (chunk[i] !== gzipHeader[i])
- this[UNZIP] = false
- }
- if (this[UNZIP] === null) {
- const ended = this[ENDED]
- this[ENDED] = false
- this[UNZIP] = new zlib.Unzip()
- this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
- this[UNZIP].on('error', er => this.abort(er))
- this[UNZIP].on('end', _ => {
- this[ENDED] = true
- this[CONSUMECHUNK]()
- })
- this[WRITING] = true
- const ret = this[UNZIP][ended ? 'end' : 'write'](chunk)
- this[WRITING] = false
- return ret
- }
- }
-
- this[WRITING] = true
- if (this[UNZIP])
- this[UNZIP].write(chunk)
- else
- this[CONSUMECHUNK](chunk)
- this[WRITING] = false
-
- // return false if there's a queue, or if the current entry isn't flowing
- const ret =
- this[QUEUE].length ? false :
- this[READENTRY] ? this[READENTRY].flowing :
- true
-
- // if we have no queue, then that means a clogged READENTRY
- if (!ret && !this[QUEUE].length)
- this[READENTRY].once('drain', _ => this.emit('drain'))
-
- return ret
- }
-
- [BUFFERCONCAT] (c) {
- if (c && !this[ABORTED])
- this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c
- }
-
- [MAYBEEND] () {
- if (this[ENDED] &&
- !this[EMITTEDEND] &&
- !this[ABORTED] &&
- !this[CONSUMING]) {
- this[EMITTEDEND] = true
- const entry = this[WRITEENTRY]
- if (entry && entry.blockRemain) {
- // truncated, likely a damaged file
- const have = this[BUFFER] ? this[BUFFER].length : 0
- this.warn('TAR_BAD_ARCHIVE', `Truncated input (needed ${
- entry.blockRemain} more bytes, only ${have} available)`, {entry})
- if (this[BUFFER])
- entry.write(this[BUFFER])
- entry.end()
- }
- this[EMIT](DONE)
- }
- }
-
- [CONSUMECHUNK] (chunk) {
- if (this[CONSUMING])
- this[BUFFERCONCAT](chunk)
- else if (!chunk && !this[BUFFER])
- this[MAYBEEND]()
- else {
- this[CONSUMING] = true
- if (this[BUFFER]) {
- this[BUFFERCONCAT](chunk)
- const c = this[BUFFER]
- this[BUFFER] = null
- this[CONSUMECHUNKSUB](c)
- } else
- this[CONSUMECHUNKSUB](chunk)
-
- while (this[BUFFER] &&
- this[BUFFER].length >= 512 &&
- !this[ABORTED] &&
- !this[SAW_EOF]) {
- const c = this[BUFFER]
- this[BUFFER] = null
- this[CONSUMECHUNKSUB](c)
- }
- this[CONSUMING] = false
- }
-
- if (!this[BUFFER] || this[ENDED])
- this[MAYBEEND]()
- }
-
- [CONSUMECHUNKSUB] (chunk) {
- // we know that we are in CONSUMING mode, so anything written goes into
- // the buffer. Advance the position and put any remainder in the buffer.
- let position = 0
- const length = chunk.length
- while (position + 512 <= length && !this[ABORTED] && !this[SAW_EOF]) {
- switch (this[STATE]) {
- case 'begin':
- case 'header':
- this[CONSUMEHEADER](chunk, position)
- position += 512
- break
-
- case 'ignore':
- case 'body':
- position += this[CONSUMEBODY](chunk, position)
- break
-
- case 'meta':
- position += this[CONSUMEMETA](chunk, position)
- break
-
- /* istanbul ignore next */
- default:
- throw new Error('invalid state: ' + this[STATE])
- }
- }
-
- if (position < length) {
- if (this[BUFFER])
- this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]])
- else
- this[BUFFER] = chunk.slice(position)
- }
- }
-
- end (chunk) {
- if (!this[ABORTED]) {
- if (this[UNZIP])
- this[UNZIP].end(chunk)
- else {
- this[ENDED] = true
- this.write(chunk)
- }
- }
- }
-})
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/path-reservations.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/path-reservations.js
deleted file mode 100644
index 8183c45f..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/path-reservations.js
+++ /dev/null
@@ -1,148 +0,0 @@
-// A path exclusive reservation system
-// reserve([list, of, paths], fn)
-// When the fn is first in line for all its paths, it
-// is called with a cb that clears the reservation.
-//
-// Used by async unpack to avoid clobbering paths in use,
-// while still allowing maximal safe parallelization.
-
-const assert = require('assert')
-const normalize = require('./normalize-unicode.js')
-const stripSlashes = require('./strip-trailing-slashes.js')
-const { join } = require('path')
-
-const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
-const isWindows = platform === 'win32'
-
-module.exports = () => {
- // path => [function or Set]
- // A Set object means a directory reservation
- // A fn is a direct reservation on that path
- const queues = new Map()
-
- // fn => {paths:[path,...], dirs:[path, ...]}
- const reservations = new Map()
-
- // return a set of parent dirs for a given path
- // '/a/b/c/d' -> ['/', '/a', '/a/b', '/a/b/c', '/a/b/c/d']
- const getDirs = path => {
- const dirs = path.split('/').slice(0, -1).reduce((set, path) => {
- if (set.length)
- path = join(set[set.length - 1], path)
- set.push(path || '/')
- return set
- }, [])
- return dirs
- }
-
- // functions currently running
- const running = new Set()
-
- // return the queues for each path the function cares about
- // fn => {paths, dirs}
- const getQueues = fn => {
- const res = reservations.get(fn)
- /* istanbul ignore if - unpossible */
- if (!res)
- throw new Error('function does not have any path reservations')
- return {
- paths: res.paths.map(path => queues.get(path)),
- dirs: [...res.dirs].map(path => queues.get(path)),
- }
- }
-
- // check if fn is first in line for all its paths, and is
- // included in the first set for all its dir queues
- const check = fn => {
- const {paths, dirs} = getQueues(fn)
- return paths.every(q => q[0] === fn) &&
- dirs.every(q => q[0] instanceof Set && q[0].has(fn))
- }
-
- // run the function if it's first in line and not already running
- const run = fn => {
- if (running.has(fn) || !check(fn))
- return false
- running.add(fn)
- fn(() => clear(fn))
- return true
- }
-
- const clear = fn => {
- if (!running.has(fn))
- return false
-
- const { paths, dirs } = reservations.get(fn)
- const next = new Set()
-
- paths.forEach(path => {
- const q = queues.get(path)
- assert.equal(q[0], fn)
- if (q.length === 1)
- queues.delete(path)
- else {
- q.shift()
- if (typeof q[0] === 'function')
- next.add(q[0])
- else
- q[0].forEach(fn => next.add(fn))
- }
- })
-
- dirs.forEach(dir => {
- const q = queues.get(dir)
- assert(q[0] instanceof Set)
- if (q[0].size === 1 && q.length === 1)
- queues.delete(dir)
- else if (q[0].size === 1) {
- q.shift()
-
- // must be a function or else the Set would've been reused
- next.add(q[0])
- } else
- q[0].delete(fn)
- })
- running.delete(fn)
-
- next.forEach(fn => run(fn))
- return true
- }
-
- const reserve = (paths, fn) => {
- // collide on matches across case and unicode normalization
- // On windows, thanks to the magic of 8.3 shortnames, it is fundamentally
- // impossible to determine whether two paths refer to the same thing on
- // disk, without asking the kernel for a shortname.
- // So, we just pretend that every path matches every other path here,
- // effectively removing all parallelization on windows.
- paths = isWindows ? ['win32 parallelization disabled'] : paths.map(p => {
- // don't need normPath, because we skip this entirely for windows
- return normalize(stripSlashes(join(p))).toLowerCase()
- })
-
- const dirs = new Set(
- paths.map(path => getDirs(path)).reduce((a, b) => a.concat(b))
- )
- reservations.set(fn, {dirs, paths})
- paths.forEach(path => {
- const q = queues.get(path)
- if (!q)
- queues.set(path, [fn])
- else
- q.push(fn)
- })
- dirs.forEach(dir => {
- const q = queues.get(dir)
- if (!q)
- queues.set(dir, [new Set([fn])])
- else if (q[q.length - 1] instanceof Set)
- q[q.length - 1].add(fn)
- else
- q.push(new Set([fn]))
- })
-
- return run(fn)
- }
-
- return { check, reserve }
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/pax.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/pax.js
deleted file mode 100644
index 7768c7b4..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/pax.js
+++ /dev/null
@@ -1,143 +0,0 @@
-'use strict'
-const Header = require('./header.js')
-const path = require('path')
-
-class Pax {
- constructor (obj, global) {
- this.atime = obj.atime || null
- this.charset = obj.charset || null
- this.comment = obj.comment || null
- this.ctime = obj.ctime || null
- this.gid = obj.gid || null
- this.gname = obj.gname || null
- this.linkpath = obj.linkpath || null
- this.mtime = obj.mtime || null
- this.path = obj.path || null
- this.size = obj.size || null
- this.uid = obj.uid || null
- this.uname = obj.uname || null
- this.dev = obj.dev || null
- this.ino = obj.ino || null
- this.nlink = obj.nlink || null
- this.global = global || false
- }
-
- encode () {
- const body = this.encodeBody()
- if (body === '')
- return null
-
- const bodyLen = Buffer.byteLength(body)
- // round up to 512 bytes
- // add 512 for header
- const bufLen = 512 * Math.ceil(1 + bodyLen / 512)
- const buf = Buffer.allocUnsafe(bufLen)
-
- // 0-fill the header section, it might not hit every field
- for (let i = 0; i < 512; i++)
- buf[i] = 0
-
- new Header({
- // XXX split the path
- // then the path should be PaxHeader + basename, but less than 99,
- // prepend with the dirname
- path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99),
- mode: this.mode || 0o644,
- uid: this.uid || null,
- gid: this.gid || null,
- size: bodyLen,
- mtime: this.mtime || null,
- type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader',
- linkpath: '',
- uname: this.uname || '',
- gname: this.gname || '',
- devmaj: 0,
- devmin: 0,
- atime: this.atime || null,
- ctime: this.ctime || null,
- }).encode(buf)
-
- buf.write(body, 512, bodyLen, 'utf8')
-
- // null pad after the body
- for (let i = bodyLen + 512; i < buf.length; i++)
- buf[i] = 0
-
- return buf
- }
-
- encodeBody () {
- return (
- this.encodeField('path') +
- this.encodeField('ctime') +
- this.encodeField('atime') +
- this.encodeField('dev') +
- this.encodeField('ino') +
- this.encodeField('nlink') +
- this.encodeField('charset') +
- this.encodeField('comment') +
- this.encodeField('gid') +
- this.encodeField('gname') +
- this.encodeField('linkpath') +
- this.encodeField('mtime') +
- this.encodeField('size') +
- this.encodeField('uid') +
- this.encodeField('uname')
- )
- }
-
- encodeField (field) {
- if (this[field] === null || this[field] === undefined)
- return ''
- const v = this[field] instanceof Date ? this[field].getTime() / 1000
- : this[field]
- const s = ' ' +
- (field === 'dev' || field === 'ino' || field === 'nlink'
- ? 'SCHILY.' : '') +
- field + '=' + v + '\n'
- const byteLen = Buffer.byteLength(s)
- // the digits includes the length of the digits in ascii base-10
- // so if it's 9 characters, then adding 1 for the 9 makes it 10
- // which makes it 11 chars.
- let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1
- if (byteLen + digits >= Math.pow(10, digits))
- digits += 1
- const len = digits + byteLen
- return len + s
- }
-}
-
-Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g)
-
-const merge = (a, b) =>
- b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a
-
-const parseKV = string =>
- string
- .replace(/\n$/, '')
- .split('\n')
- .reduce(parseKVLine, Object.create(null))
-
-const parseKVLine = (set, line) => {
- const n = parseInt(line, 10)
-
- // XXX Values with \n in them will fail this.
- // Refactor to not be a naive line-by-line parse.
- if (n !== Buffer.byteLength(line) + 1)
- return set
-
- line = line.substr((n + ' ').length)
- const kv = line.split('=')
- const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1')
- if (!k)
- return set
-
- const v = kv.join('=')
- set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k)
- ? new Date(v * 1000)
- : /^[0-9]+$/.test(v) ? +v
- : v
- return set
-}
-
-module.exports = Pax
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/read-entry.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/read-entry.js
deleted file mode 100644
index 183a6050..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/read-entry.js
+++ /dev/null
@@ -1,100 +0,0 @@
-'use strict'
-const MiniPass = require('minipass')
-const normPath = require('./normalize-windows-path.js')
-
-const SLURP = Symbol('slurp')
-module.exports = class ReadEntry extends MiniPass {
- constructor (header, ex, gex) {
- super()
- // read entries always start life paused. this is to avoid the
- // situation where Minipass's auto-ending empty streams results
- // in an entry ending before we're ready for it.
- this.pause()
- this.extended = ex
- this.globalExtended = gex
- this.header = header
- this.startBlockSize = 512 * Math.ceil(header.size / 512)
- this.blockRemain = this.startBlockSize
- this.remain = header.size
- this.type = header.type
- this.meta = false
- this.ignore = false
- switch (this.type) {
- case 'File':
- case 'OldFile':
- case 'Link':
- case 'SymbolicLink':
- case 'CharacterDevice':
- case 'BlockDevice':
- case 'Directory':
- case 'FIFO':
- case 'ContiguousFile':
- case 'GNUDumpDir':
- break
-
- case 'NextFileHasLongLinkpath':
- case 'NextFileHasLongPath':
- case 'OldGnuLongPath':
- case 'GlobalExtendedHeader':
- case 'ExtendedHeader':
- case 'OldExtendedHeader':
- this.meta = true
- break
-
- // NOTE: gnutar and bsdtar treat unrecognized types as 'File'
- // it may be worth doing the same, but with a warning.
- default:
- this.ignore = true
- }
-
- this.path = normPath(header.path)
- this.mode = header.mode
- if (this.mode)
- this.mode = this.mode & 0o7777
- this.uid = header.uid
- this.gid = header.gid
- this.uname = header.uname
- this.gname = header.gname
- this.size = header.size
- this.mtime = header.mtime
- this.atime = header.atime
- this.ctime = header.ctime
- this.linkpath = normPath(header.linkpath)
- this.uname = header.uname
- this.gname = header.gname
-
- if (ex)
- this[SLURP](ex)
- if (gex)
- this[SLURP](gex, true)
- }
-
- write (data) {
- const writeLen = data.length
- if (writeLen > this.blockRemain)
- throw new Error('writing more to entry than is appropriate')
-
- const r = this.remain
- const br = this.blockRemain
- this.remain = Math.max(0, r - writeLen)
- this.blockRemain = Math.max(0, br - writeLen)
- if (this.ignore)
- return true
-
- if (r >= writeLen)
- return super.write(data)
-
- // r < writeLen
- return super.write(data.slice(0, r))
- }
-
- [SLURP] (ex, global) {
- for (const k in ex) {
- // we slurp in everything except for the path attribute in
- // a global extended header, because that's weird.
- if (ex[k] !== null && ex[k] !== undefined &&
- !(global && k === 'path'))
- this[k] = k === 'path' || k === 'linkpath' ? normPath(ex[k]) : ex[k]
- }
- }
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/replace.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/replace.js
deleted file mode 100644
index 1374f3f2..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/replace.js
+++ /dev/null
@@ -1,223 +0,0 @@
-'use strict'
-
-// tar -r
-const hlo = require('./high-level-opt.js')
-const Pack = require('./pack.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const t = require('./list.js')
-const path = require('path')
-
-// starting at the head of the file, read a Header
-// If the checksum is invalid, that's our position to start writing
-// If it is, jump forward by the specified size (round up to 512)
-// and try again.
-// Write the new Pack stream starting there.
-
-const Header = require('./header.js')
-
-module.exports = (opt_, files, cb) => {
- const opt = hlo(opt_)
-
- if (!opt.file)
- throw new TypeError('file is required')
-
- if (opt.gzip)
- throw new TypeError('cannot append to compressed archives')
-
- if (!files || !Array.isArray(files) || !files.length)
- throw new TypeError('no files or directories specified')
-
- files = Array.from(files)
-
- return opt.sync ? replaceSync(opt, files)
- : replace(opt, files, cb)
-}
-
-const replaceSync = (opt, files) => {
- const p = new Pack.Sync(opt)
-
- let threw = true
- let fd
- let position
-
- try {
- try {
- fd = fs.openSync(opt.file, 'r+')
- } catch (er) {
- if (er.code === 'ENOENT')
- fd = fs.openSync(opt.file, 'w+')
- else
- throw er
- }
-
- const st = fs.fstatSync(fd)
- const headBuf = Buffer.alloc(512)
-
- POSITION: for (position = 0; position < st.size; position += 512) {
- for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
- bytes = fs.readSync(
- fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
- )
-
- if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
- throw new Error('cannot append to compressed archives')
-
- if (!bytes)
- break POSITION
- }
-
- const h = new Header(headBuf)
- if (!h.cksumValid)
- break
- const entryBlockSize = 512 * Math.ceil(h.size / 512)
- if (position + entryBlockSize + 512 > st.size)
- break
- // the 512 for the header we just parsed will be added as well
- // also jump ahead all the blocks for the body
- position += entryBlockSize
- if (opt.mtimeCache)
- opt.mtimeCache.set(h.path, h.mtime)
- }
- threw = false
-
- streamSync(opt, p, position, fd, files)
- } finally {
- if (threw) {
- try {
- fs.closeSync(fd)
- } catch (er) {}
- }
- }
-}
-
-const streamSync = (opt, p, position, fd, files) => {
- const stream = new fsm.WriteStreamSync(opt.file, {
- fd: fd,
- start: position,
- })
- p.pipe(stream)
- addFilesSync(p, files)
-}
-
-const replace = (opt, files, cb) => {
- files = Array.from(files)
- const p = new Pack(opt)
-
- const getPos = (fd, size, cb_) => {
- const cb = (er, pos) => {
- if (er)
- fs.close(fd, _ => cb_(er))
- else
- cb_(null, pos)
- }
-
- let position = 0
- if (size === 0)
- return cb(null, 0)
-
- let bufPos = 0
- const headBuf = Buffer.alloc(512)
- const onread = (er, bytes) => {
- if (er)
- return cb(er)
- bufPos += bytes
- if (bufPos < 512 && bytes) {
- return fs.read(
- fd, headBuf, bufPos, headBuf.length - bufPos,
- position + bufPos, onread
- )
- }
-
- if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
- return cb(new Error('cannot append to compressed archives'))
-
- // truncated header
- if (bufPos < 512)
- return cb(null, position)
-
- const h = new Header(headBuf)
- if (!h.cksumValid)
- return cb(null, position)
-
- const entryBlockSize = 512 * Math.ceil(h.size / 512)
- if (position + entryBlockSize + 512 > size)
- return cb(null, position)
-
- position += entryBlockSize + 512
- if (position >= size)
- return cb(null, position)
-
- if (opt.mtimeCache)
- opt.mtimeCache.set(h.path, h.mtime)
- bufPos = 0
- fs.read(fd, headBuf, 0, 512, position, onread)
- }
- fs.read(fd, headBuf, 0, 512, position, onread)
- }
-
- const promise = new Promise((resolve, reject) => {
- p.on('error', reject)
- let flag = 'r+'
- const onopen = (er, fd) => {
- if (er && er.code === 'ENOENT' && flag === 'r+') {
- flag = 'w+'
- return fs.open(opt.file, flag, onopen)
- }
-
- if (er)
- return reject(er)
-
- fs.fstat(fd, (er, st) => {
- if (er)
- return fs.close(fd, () => reject(er))
-
- getPos(fd, st.size, (er, position) => {
- if (er)
- return reject(er)
- const stream = new fsm.WriteStream(opt.file, {
- fd: fd,
- start: position,
- })
- p.pipe(stream)
- stream.on('error', reject)
- stream.on('close', resolve)
- addFilesAsync(p, files)
- })
- })
- }
- fs.open(opt.file, flag, onopen)
- })
-
- return cb ? promise.then(cb, cb) : promise
-}
-
-const addFilesSync = (p, files) => {
- files.forEach(file => {
- if (file.charAt(0) === '@') {
- t({
- file: path.resolve(p.cwd, file.substr(1)),
- sync: true,
- noResume: true,
- onentry: entry => p.add(entry),
- })
- } else
- p.add(file)
- })
- p.end()
-}
-
-const addFilesAsync = (p, files) => {
- while (files.length) {
- const file = files.shift()
- if (file.charAt(0) === '@') {
- return t({
- file: path.resolve(p.cwd, file.substr(1)),
- noResume: true,
- onentry: entry => p.add(entry),
- }).then(_ => addFilesAsync(p, files))
- } else
- p.add(file)
- }
- p.end()
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/strip-absolute-path.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/strip-absolute-path.js
deleted file mode 100644
index 1aa2d2ae..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/strip-absolute-path.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// unix absolute paths are also absolute on win32, so we use this for both
-const { isAbsolute, parse } = require('path').win32
-
-// returns [root, stripped]
-// Note that windows will think that //x/y/z/a has a "root" of //x/y, and in
-// those cases, we want to sanitize it to x/y/z/a, not z/a, so we strip /
-// explicitly if it's the first character.
-// drive-specific relative paths on Windows get their root stripped off even
-// though they are not absolute, so `c:../foo` becomes ['c:', '../foo']
-module.exports = path => {
- let r = ''
-
- let parsed = parse(path)
- while (isAbsolute(path) || parsed.root) {
- // windows will think that //x/y/z has a "root" of //x/y/
- // but strip the //?/C:/ off of //?/C:/path
- const root = path.charAt(0) === '/' && path.slice(0, 4) !== '//?/' ? '/'
- : parsed.root
- path = path.substr(root.length)
- r += root
- parsed = parse(path)
- }
- return [r, path]
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/strip-trailing-slashes.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/strip-trailing-slashes.js
deleted file mode 100644
index 3e3ecec5..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/strip-trailing-slashes.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// warning: extremely hot code path.
-// This has been meticulously optimized for use
-// within npm install on large package trees.
-// Do not edit without careful benchmarking.
-module.exports = str => {
- let i = str.length - 1
- let slashesStart = -1
- while (i > -1 && str.charAt(i) === '/') {
- slashesStart = i
- i--
- }
- return slashesStart === -1 ? str : str.slice(0, slashesStart)
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/types.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/types.js
deleted file mode 100644
index 7bfc2546..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/types.js
+++ /dev/null
@@ -1,44 +0,0 @@
-'use strict'
-// map types from key to human-friendly name
-exports.name = new Map([
- ['0', 'File'],
- // same as File
- ['', 'OldFile'],
- ['1', 'Link'],
- ['2', 'SymbolicLink'],
- // Devices and FIFOs aren't fully supported
- // they are parsed, but skipped when unpacking
- ['3', 'CharacterDevice'],
- ['4', 'BlockDevice'],
- ['5', 'Directory'],
- ['6', 'FIFO'],
- // same as File
- ['7', 'ContiguousFile'],
- // pax headers
- ['g', 'GlobalExtendedHeader'],
- ['x', 'ExtendedHeader'],
- // vendor-specific stuff
- // skip
- ['A', 'SolarisACL'],
- // like 5, but with data, which should be skipped
- ['D', 'GNUDumpDir'],
- // metadata only, skip
- ['I', 'Inode'],
- // data = link path of next file
- ['K', 'NextFileHasLongLinkpath'],
- // data = path of next file
- ['L', 'NextFileHasLongPath'],
- // skip
- ['M', 'ContinuationFile'],
- // like L
- ['N', 'OldGnuLongPath'],
- // skip
- ['S', 'SparseFile'],
- // skip
- ['V', 'TapeVolumeHeader'],
- // like x
- ['X', 'OldExtendedHeader'],
-])
-
-// map the other direction
-exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]]))
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/unpack.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/unpack.js
deleted file mode 100644
index 7d39dc0f..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/unpack.js
+++ /dev/null
@@ -1,877 +0,0 @@
-'use strict'
-
-// the PEND/UNPEND stuff tracks whether we're ready to emit end/close yet.
-// but the path reservations are required to avoid race conditions where
-// parallelized unpack ops may mess with one another, due to dependencies
-// (like a Link depending on its target) or destructive operations (like
-// clobbering an fs object to create one of a different type.)
-
-const assert = require('assert')
-const Parser = require('./parse.js')
-const fs = require('fs')
-const fsm = require('fs-minipass')
-const path = require('path')
-const mkdir = require('./mkdir.js')
-const wc = require('./winchars.js')
-const pathReservations = require('./path-reservations.js')
-const stripAbsolutePath = require('./strip-absolute-path.js')
-const normPath = require('./normalize-windows-path.js')
-const stripSlash = require('./strip-trailing-slashes.js')
-const normalize = require('./normalize-unicode.js')
-
-const ONENTRY = Symbol('onEntry')
-const CHECKFS = Symbol('checkFs')
-const CHECKFS2 = Symbol('checkFs2')
-const PRUNECACHE = Symbol('pruneCache')
-const ISREUSABLE = Symbol('isReusable')
-const MAKEFS = Symbol('makeFs')
-const FILE = Symbol('file')
-const DIRECTORY = Symbol('directory')
-const LINK = Symbol('link')
-const SYMLINK = Symbol('symlink')
-const HARDLINK = Symbol('hardlink')
-const UNSUPPORTED = Symbol('unsupported')
-const CHECKPATH = Symbol('checkPath')
-const MKDIR = Symbol('mkdir')
-const ONERROR = Symbol('onError')
-const PENDING = Symbol('pending')
-const PEND = Symbol('pend')
-const UNPEND = Symbol('unpend')
-const ENDED = Symbol('ended')
-const MAYBECLOSE = Symbol('maybeClose')
-const SKIP = Symbol('skip')
-const DOCHOWN = Symbol('doChown')
-const UID = Symbol('uid')
-const GID = Symbol('gid')
-const CHECKED_CWD = Symbol('checkedCwd')
-const crypto = require('crypto')
-const getFlag = require('./get-write-flag.js')
-const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
-const isWindows = platform === 'win32'
-
-// Unlinks on Windows are not atomic.
-//
-// This means that if you have a file entry, followed by another
-// file entry with an identical name, and you cannot re-use the file
-// (because it's a hardlink, or because unlink:true is set, or it's
-// Windows, which does not have useful nlink values), then the unlink
-// will be committed to the disk AFTER the new file has been written
-// over the old one, deleting the new file.
-//
-// To work around this, on Windows systems, we rename the file and then
-// delete the renamed file. It's a sloppy kludge, but frankly, I do not
-// know of a better way to do this, given windows' non-atomic unlink
-// semantics.
-//
-// See: https://github.com/npm/node-tar/issues/183
-/* istanbul ignore next */
-const unlinkFile = (path, cb) => {
- if (!isWindows)
- return fs.unlink(path, cb)
-
- const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
- fs.rename(path, name, er => {
- if (er)
- return cb(er)
- fs.unlink(name, cb)
- })
-}
-
-/* istanbul ignore next */
-const unlinkFileSync = path => {
- if (!isWindows)
- return fs.unlinkSync(path)
-
- const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
- fs.renameSync(path, name)
- fs.unlinkSync(name)
-}
-
-// this.gid, entry.gid, this.processUid
-const uint32 = (a, b, c) =>
- a === a >>> 0 ? a
- : b === b >>> 0 ? b
- : c
-
-// clear the cache if it's a case-insensitive unicode-squashing match.
-// we can't know if the current file system is case-sensitive or supports
-// unicode fully, so we check for similarity on the maximally compatible
-// representation. Err on the side of pruning, since all it's doing is
-// preventing lstats, and it's not the end of the world if we get a false
-// positive.
-// Note that on windows, we always drop the entire cache whenever a
-// symbolic link is encountered, because 8.3 filenames are impossible
-// to reason about, and collisions are hazards rather than just failures.
-const cacheKeyNormalize = path => normalize(stripSlash(normPath(path)))
- .toLowerCase()
-
-const pruneCache = (cache, abs) => {
- abs = cacheKeyNormalize(abs)
- for (const path of cache.keys()) {
- const pnorm = cacheKeyNormalize(path)
- if (pnorm === abs || pnorm.indexOf(abs + '/') === 0)
- cache.delete(path)
- }
-}
-
-const dropCache = cache => {
- for (const key of cache.keys())
- cache.delete(key)
-}
-
-class Unpack extends Parser {
- constructor (opt) {
- if (!opt)
- opt = {}
-
- opt.ondone = _ => {
- this[ENDED] = true
- this[MAYBECLOSE]()
- }
-
- super(opt)
-
- this[CHECKED_CWD] = false
-
- this.reservations = pathReservations()
-
- this.transform = typeof opt.transform === 'function' ? opt.transform : null
-
- this.writable = true
- this.readable = false
-
- this[PENDING] = 0
- this[ENDED] = false
-
- this.dirCache = opt.dirCache || new Map()
-
- if (typeof opt.uid === 'number' || typeof opt.gid === 'number') {
- // need both or neither
- if (typeof opt.uid !== 'number' || typeof opt.gid !== 'number')
- throw new TypeError('cannot set owner without number uid and gid')
- if (opt.preserveOwner) {
- throw new TypeError(
- 'cannot preserve owner in archive and also set owner explicitly')
- }
- this.uid = opt.uid
- this.gid = opt.gid
- this.setOwner = true
- } else {
- this.uid = null
- this.gid = null
- this.setOwner = false
- }
-
- // default true for root
- if (opt.preserveOwner === undefined && typeof opt.uid !== 'number')
- this.preserveOwner = process.getuid && process.getuid() === 0
- else
- this.preserveOwner = !!opt.preserveOwner
-
- this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ?
- process.getuid() : null
- this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ?
- process.getgid() : null
-
- // mostly just for testing, but useful in some cases.
- // Forcibly trigger a chown on every entry, no matter what
- this.forceChown = opt.forceChown === true
-
- // turn ><?| in filenames into 0xf000-higher encoded forms
- this.win32 = !!opt.win32 || isWindows
-
- // do not unpack over files that are newer than what's in the archive
- this.newer = !!opt.newer
-
- // do not unpack over ANY files
- this.keep = !!opt.keep
-
- // do not set mtime/atime of extracted entries
- this.noMtime = !!opt.noMtime
-
- // allow .., absolute path entries, and unpacking through symlinks
- // without this, warn and skip .., relativize absolutes, and error
- // on symlinks in extraction path
- this.preservePaths = !!opt.preservePaths
-
- // unlink files and links before writing. This breaks existing hard
- // links, and removes symlink directories rather than erroring
- this.unlink = !!opt.unlink
-
- this.cwd = normPath(path.resolve(opt.cwd || process.cwd()))
- this.strip = +opt.strip || 0
- // if we're not chmodding, then we don't need the process umask
- this.processUmask = opt.noChmod ? 0 : process.umask()
- this.umask = typeof opt.umask === 'number' ? opt.umask : this.processUmask
-
- // default mode for dirs created as parents
- this.dmode = opt.dmode || (0o0777 & (~this.umask))
- this.fmode = opt.fmode || (0o0666 & (~this.umask))
-
- this.on('entry', entry => this[ONENTRY](entry))
- }
-
- // a bad or damaged archive is a warning for Parser, but an error
- // when extracting. Mark those errors as unrecoverable, because
- // the Unpack contract cannot be met.
- warn (code, msg, data = {}) {
- if (code === 'TAR_BAD_ARCHIVE' || code === 'TAR_ABORT')
- data.recoverable = false
- return super.warn(code, msg, data)
- }
-
- [MAYBECLOSE] () {
- if (this[ENDED] && this[PENDING] === 0) {
- this.emit('prefinish')
- this.emit('finish')
- this.emit('end')
- this.emit('close')
- }
- }
-
- [CHECKPATH] (entry) {
- if (this.strip) {
- const parts = normPath(entry.path).split('/')
- if (parts.length < this.strip)
- return false
- entry.path = parts.slice(this.strip).join('/')
-
- if (entry.type === 'Link') {
- const linkparts = normPath(entry.linkpath).split('/')
- if (linkparts.length >= this.strip)
- entry.linkpath = linkparts.slice(this.strip).join('/')
- else
- return false
- }
- }
-
- if (!this.preservePaths) {
- const p = normPath(entry.path)
- const parts = p.split('/')
- if (parts.includes('..') || isWindows && /^[a-z]:\.\.$/i.test(parts[0])) {
- this.warn('TAR_ENTRY_ERROR', `path contains '..'`, {
- entry,
- path: p,
- })
- return false
- }
-
- // strip off the root
- const [root, stripped] = stripAbsolutePath(p)
- if (root) {
- entry.path = stripped
- this.warn('TAR_ENTRY_INFO', `stripping ${root} from absolute path`, {
- entry,
- path: p,
- })
- }
- }
-
- if (path.isAbsolute(entry.path))
- entry.absolute = normPath(path.resolve(entry.path))
- else
- entry.absolute = normPath(path.resolve(this.cwd, entry.path))
-
- // if we somehow ended up with a path that escapes the cwd, and we are
- // not in preservePaths mode, then something is fishy! This should have
- // been prevented above, so ignore this for coverage.
- /* istanbul ignore if - defense in depth */
- if (!this.preservePaths &&
- entry.absolute.indexOf(this.cwd + '/') !== 0 &&
- entry.absolute !== this.cwd) {
- this.warn('TAR_ENTRY_ERROR', 'path escaped extraction target', {
- entry,
- path: normPath(entry.path),
- resolvedPath: entry.absolute,
- cwd: this.cwd,
- })
- return false
- }
-
- // an archive can set properties on the extraction directory, but it
- // may not replace the cwd with a different kind of thing entirely.
- if (entry.absolute === this.cwd &&
- entry.type !== 'Directory' &&
- entry.type !== 'GNUDumpDir')
- return false
-
- // only encode : chars that aren't drive letter indicators
- if (this.win32) {
- const { root: aRoot } = path.win32.parse(entry.absolute)
- entry.absolute = aRoot + wc.encode(entry.absolute.substr(aRoot.length))
- const { root: pRoot } = path.win32.parse(entry.path)
- entry.path = pRoot + wc.encode(entry.path.substr(pRoot.length))
- }
-
- return true
- }
-
- [ONENTRY] (entry) {
- if (!this[CHECKPATH](entry))
- return entry.resume()
-
- assert.equal(typeof entry.absolute, 'string')
-
- switch (entry.type) {
- case 'Directory':
- case 'GNUDumpDir':
- if (entry.mode)
- entry.mode = entry.mode | 0o700
-
- case 'File':
- case 'OldFile':
- case 'ContiguousFile':
- case 'Link':
- case 'SymbolicLink':
- return this[CHECKFS](entry)
-
- case 'CharacterDevice':
- case 'BlockDevice':
- case 'FIFO':
- default:
- return this[UNSUPPORTED](entry)
- }
- }
-
- [ONERROR] (er, entry) {
- // Cwd has to exist, or else nothing works. That's serious.
- // Other errors are warnings, which raise the error in strict
- // mode, but otherwise continue on.
- if (er.name === 'CwdError')
- this.emit('error', er)
- else {
- this.warn('TAR_ENTRY_ERROR', er, {entry})
- this[UNPEND]()
- entry.resume()
- }
- }
-
- [MKDIR] (dir, mode, cb) {
- mkdir(normPath(dir), {
- uid: this.uid,
- gid: this.gid,
- processUid: this.processUid,
- processGid: this.processGid,
- umask: this.processUmask,
- preserve: this.preservePaths,
- unlink: this.unlink,
- cache: this.dirCache,
- cwd: this.cwd,
- mode: mode,
- noChmod: this.noChmod,
- }, cb)
- }
-
- [DOCHOWN] (entry) {
- // in preserve owner mode, chown if the entry doesn't match process
- // in set owner mode, chown if setting doesn't match process
- return this.forceChown ||
- this.preserveOwner &&
- (typeof entry.uid === 'number' && entry.uid !== this.processUid ||
- typeof entry.gid === 'number' && entry.gid !== this.processGid)
- ||
- (typeof this.uid === 'number' && this.uid !== this.processUid ||
- typeof this.gid === 'number' && this.gid !== this.processGid)
- }
-
- [UID] (entry) {
- return uint32(this.uid, entry.uid, this.processUid)
- }
-
- [GID] (entry) {
- return uint32(this.gid, entry.gid, this.processGid)
- }
-
- [FILE] (entry, fullyDone) {
- const mode = entry.mode & 0o7777 || this.fmode
- const stream = new fsm.WriteStream(entry.absolute, {
- flags: getFlag(entry.size),
- mode: mode,
- autoClose: false,
- })
- stream.on('error', er => {
- if (stream.fd)
- fs.close(stream.fd, () => {})
-
- // flush all the data out so that we aren't left hanging
- // if the error wasn't actually fatal. otherwise the parse
- // is blocked, and we never proceed.
- stream.write = () => true
- this[ONERROR](er, entry)
- fullyDone()
- })
-
- let actions = 1
- const done = er => {
- if (er) {
- /* istanbul ignore else - we should always have a fd by now */
- if (stream.fd)
- fs.close(stream.fd, () => {})
-
- this[ONERROR](er, entry)
- fullyDone()
- return
- }
-
- if (--actions === 0) {
- fs.close(stream.fd, er => {
- if (er)
- this[ONERROR](er, entry)
- else
- this[UNPEND]()
- fullyDone()
- })
- }
- }
-
- stream.on('finish', _ => {
- // if futimes fails, try utimes
- // if utimes fails, fail with the original error
- // same for fchown/chown
- const abs = entry.absolute
- const fd = stream.fd
-
- if (entry.mtime && !this.noMtime) {
- actions++
- const atime = entry.atime || new Date()
- const mtime = entry.mtime
- fs.futimes(fd, atime, mtime, er =>
- er ? fs.utimes(abs, atime, mtime, er2 => done(er2 && er))
- : done())
- }
-
- if (this[DOCHOWN](entry)) {
- actions++
- const uid = this[UID](entry)
- const gid = this[GID](entry)
- fs.fchown(fd, uid, gid, er =>
- er ? fs.chown(abs, uid, gid, er2 => done(er2 && er))
- : done())
- }
-
- done()
- })
-
- const tx = this.transform ? this.transform(entry) || entry : entry
- if (tx !== entry) {
- tx.on('error', er => {
- this[ONERROR](er, entry)
- fullyDone()
- })
- entry.pipe(tx)
- }
- tx.pipe(stream)
- }
-
- [DIRECTORY] (entry, fullyDone) {
- const mode = entry.mode & 0o7777 || this.dmode
- this[MKDIR](entry.absolute, mode, er => {
- if (er) {
- this[ONERROR](er, entry)
- fullyDone()
- return
- }
-
- let actions = 1
- const done = _ => {
- if (--actions === 0) {
- fullyDone()
- this[UNPEND]()
- entry.resume()
- }
- }
-
- if (entry.mtime && !this.noMtime) {
- actions++
- fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, done)
- }
-
- if (this[DOCHOWN](entry)) {
- actions++
- fs.chown(entry.absolute, this[UID](entry), this[GID](entry), done)
- }
-
- done()
- })
- }
-
- [UNSUPPORTED] (entry) {
- entry.unsupported = true
- this.warn('TAR_ENTRY_UNSUPPORTED',
- `unsupported entry type: ${entry.type}`, {entry})
- entry.resume()
- }
-
- [SYMLINK] (entry, done) {
- this[LINK](entry, entry.linkpath, 'symlink', done)
- }
-
- [HARDLINK] (entry, done) {
- const linkpath = normPath(path.resolve(this.cwd, entry.linkpath))
- this[LINK](entry, linkpath, 'link', done)
- }
-
- [PEND] () {
- this[PENDING]++
- }
-
- [UNPEND] () {
- this[PENDING]--
- this[MAYBECLOSE]()
- }
-
- [SKIP] (entry) {
- this[UNPEND]()
- entry.resume()
- }
-
- // Check if we can reuse an existing filesystem entry safely and
- // overwrite it, rather than unlinking and recreating
- // Windows doesn't report a useful nlink, so we just never reuse entries
- [ISREUSABLE] (entry, st) {
- return entry.type === 'File' &&
- !this.unlink &&
- st.isFile() &&
- st.nlink <= 1 &&
- !isWindows
- }
-
- // check if a thing is there, and if so, try to clobber it
- [CHECKFS] (entry) {
- this[PEND]()
- const paths = [entry.path]
- if (entry.linkpath)
- paths.push(entry.linkpath)
- this.reservations.reserve(paths, done => this[CHECKFS2](entry, done))
- }
-
- [PRUNECACHE] (entry) {
- // if we are not creating a directory, and the path is in the dirCache,
- // then that means we are about to delete the directory we created
- // previously, and it is no longer going to be a directory, and neither
- // is any of its children.
- // If a symbolic link is encountered, all bets are off. There is no
- // reasonable way to sanitize the cache in such a way we will be able to
- // avoid having filesystem collisions. If this happens with a non-symlink
- // entry, it'll just fail to unpack, but a symlink to a directory, using an
- // 8.3 shortname or certain unicode attacks, can evade detection and lead
- // to arbitrary writes to anywhere on the system.
- if (entry.type === 'SymbolicLink')
- dropCache(this.dirCache)
- else if (entry.type !== 'Directory')
- pruneCache(this.dirCache, entry.absolute)
- }
-
- [CHECKFS2] (entry, fullyDone) {
- this[PRUNECACHE](entry)
-
- const done = er => {
- this[PRUNECACHE](entry)
- fullyDone(er)
- }
-
- const checkCwd = () => {
- this[MKDIR](this.cwd, this.dmode, er => {
- if (er) {
- this[ONERROR](er, entry)
- done()
- return
- }
- this[CHECKED_CWD] = true
- start()
- })
- }
-
- const start = () => {
- if (entry.absolute !== this.cwd) {
- const parent = normPath(path.dirname(entry.absolute))
- if (parent !== this.cwd) {
- return this[MKDIR](parent, this.dmode, er => {
- if (er) {
- this[ONERROR](er, entry)
- done()
- return
- }
- afterMakeParent()
- })
- }
- }
- afterMakeParent()
- }
-
- const afterMakeParent = () => {
- fs.lstat(entry.absolute, (lstatEr, st) => {
- if (st && (this.keep || this.newer && st.mtime > entry.mtime)) {
- this[SKIP](entry)
- done()
- return
- }
- if (lstatEr || this[ISREUSABLE](entry, st))
- return this[MAKEFS](null, entry, done)
-
- if (st.isDirectory()) {
- if (entry.type === 'Directory') {
- const needChmod = !this.noChmod &&
- entry.mode &&
- (st.mode & 0o7777) !== entry.mode
- const afterChmod = er => this[MAKEFS](er, entry, done)
- if (!needChmod)
- return afterChmod()
- return fs.chmod(entry.absolute, entry.mode, afterChmod)
- }
- // Not a dir entry, have to remove it.
- // NB: the only way to end up with an entry that is the cwd
- // itself, in such a way that == does not detect, is a
- // tricky windows absolute path with UNC or 8.3 parts (and
- // preservePaths:true, or else it will have been stripped).
- // In that case, the user has opted out of path protections
- // explicitly, so if they blow away the cwd, c'est la vie.
- if (entry.absolute !== this.cwd) {
- return fs.rmdir(entry.absolute, er =>
- this[MAKEFS](er, entry, done))
- }
- }
-
- // not a dir, and not reusable
- // don't remove if the cwd, we want that error
- if (entry.absolute === this.cwd)
- return this[MAKEFS](null, entry, done)
-
- unlinkFile(entry.absolute, er =>
- this[MAKEFS](er, entry, done))
- })
- }
-
- if (this[CHECKED_CWD])
- start()
- else
- checkCwd()
- }
-
- [MAKEFS] (er, entry, done) {
- if (er) {
- this[ONERROR](er, entry)
- done()
- return
- }
-
- switch (entry.type) {
- case 'File':
- case 'OldFile':
- case 'ContiguousFile':
- return this[FILE](entry, done)
-
- case 'Link':
- return this[HARDLINK](entry, done)
-
- case 'SymbolicLink':
- return this[SYMLINK](entry, done)
-
- case 'Directory':
- case 'GNUDumpDir':
- return this[DIRECTORY](entry, done)
- }
- }
-
- [LINK] (entry, linkpath, link, done) {
- // XXX: get the type ('symlink' or 'junction') for windows
- fs[link](linkpath, entry.absolute, er => {
- if (er)
- this[ONERROR](er, entry)
- else {
- this[UNPEND]()
- entry.resume()
- }
- done()
- })
- }
-}
-
-const callSync = fn => {
- try {
- return [null, fn()]
- } catch (er) {
- return [er, null]
- }
-}
-class UnpackSync extends Unpack {
- [MAKEFS] (er, entry) {
- return super[MAKEFS](er, entry, () => {})
- }
-
- [CHECKFS] (entry) {
- this[PRUNECACHE](entry)
-
- if (!this[CHECKED_CWD]) {
- const er = this[MKDIR](this.cwd, this.dmode)
- if (er)
- return this[ONERROR](er, entry)
- this[CHECKED_CWD] = true
- }
-
- // don't bother to make the parent if the current entry is the cwd,
- // we've already checked it.
- if (entry.absolute !== this.cwd) {
- const parent = normPath(path.dirname(entry.absolute))
- if (parent !== this.cwd) {
- const mkParent = this[MKDIR](parent, this.dmode)
- if (mkParent)
- return this[ONERROR](mkParent, entry)
- }
- }
-
- const [lstatEr, st] = callSync(() => fs.lstatSync(entry.absolute))
- if (st && (this.keep || this.newer && st.mtime > entry.mtime))
- return this[SKIP](entry)
-
- if (lstatEr || this[ISREUSABLE](entry, st))
- return this[MAKEFS](null, entry)
-
- if (st.isDirectory()) {
- if (entry.type === 'Directory') {
- const needChmod = !this.noChmod &&
- entry.mode &&
- (st.mode & 0o7777) !== entry.mode
- const [er] = needChmod ? callSync(() => {
- fs.chmodSync(entry.absolute, entry.mode)
- }) : []
- return this[MAKEFS](er, entry)
- }
- // not a dir entry, have to remove it
- const [er] = callSync(() => fs.rmdirSync(entry.absolute))
- this[MAKEFS](er, entry)
- }
-
- // not a dir, and not reusable.
- // don't remove if it's the cwd, since we want that error.
- const [er] = entry.absolute === this.cwd ? []
- : callSync(() => unlinkFileSync(entry.absolute))
- this[MAKEFS](er, entry)
- }
-
- [FILE] (entry, done) {
- const mode = entry.mode & 0o7777 || this.fmode
-
- const oner = er => {
- let closeError
- try {
- fs.closeSync(fd)
- } catch (e) {
- closeError = e
- }
- if (er || closeError)
- this[ONERROR](er || closeError, entry)
- done()
- }
-
- let fd
- try {
- fd = fs.openSync(entry.absolute, getFlag(entry.size), mode)
- } catch (er) {
- return oner(er)
- }
- const tx = this.transform ? this.transform(entry) || entry : entry
- if (tx !== entry) {
- tx.on('error', er => this[ONERROR](er, entry))
- entry.pipe(tx)
- }
-
- tx.on('data', chunk => {
- try {
- fs.writeSync(fd, chunk, 0, chunk.length)
- } catch (er) {
- oner(er)
- }
- })
-
- tx.on('end', _ => {
- let er = null
- // try both, falling futimes back to utimes
- // if either fails, handle the first error
- if (entry.mtime && !this.noMtime) {
- const atime = entry.atime || new Date()
- const mtime = entry.mtime
- try {
- fs.futimesSync(fd, atime, mtime)
- } catch (futimeser) {
- try {
- fs.utimesSync(entry.absolute, atime, mtime)
- } catch (utimeser) {
- er = futimeser
- }
- }
- }
-
- if (this[DOCHOWN](entry)) {
- const uid = this[UID](entry)
- const gid = this[GID](entry)
-
- try {
- fs.fchownSync(fd, uid, gid)
- } catch (fchowner) {
- try {
- fs.chownSync(entry.absolute, uid, gid)
- } catch (chowner) {
- er = er || fchowner
- }
- }
- }
-
- oner(er)
- })
- }
-
- [DIRECTORY] (entry, done) {
- const mode = entry.mode & 0o7777 || this.dmode
- const er = this[MKDIR](entry.absolute, mode)
- if (er) {
- this[ONERROR](er, entry)
- done()
- return
- }
- if (entry.mtime && !this.noMtime) {
- try {
- fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime)
- } catch (er) {}
- }
- if (this[DOCHOWN](entry)) {
- try {
- fs.chownSync(entry.absolute, this[UID](entry), this[GID](entry))
- } catch (er) {}
- }
- done()
- entry.resume()
- }
-
- [MKDIR] (dir, mode) {
- try {
- return mkdir.sync(normPath(dir), {
- uid: this.uid,
- gid: this.gid,
- processUid: this.processUid,
- processGid: this.processGid,
- umask: this.processUmask,
- preserve: this.preservePaths,
- unlink: this.unlink,
- cache: this.dirCache,
- cwd: this.cwd,
- mode: mode,
- })
- } catch (er) {
- return er
- }
- }
-
- [LINK] (entry, linkpath, link, done) {
- try {
- fs[link + 'Sync'](linkpath, entry.absolute)
- done()
- entry.resume()
- } catch (er) {
- return this[ONERROR](er, entry)
- }
- }
-}
-
-Unpack.Sync = UnpackSync
-module.exports = Unpack
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/update.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/update.js
deleted file mode 100644
index a5784b73..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/update.js
+++ /dev/null
@@ -1,36 +0,0 @@
-'use strict'
-
-// tar -u
-
-const hlo = require('./high-level-opt.js')
-const r = require('./replace.js')
-// just call tar.r with the filter and mtimeCache
-
-module.exports = (opt_, files, cb) => {
- const opt = hlo(opt_)
-
- if (!opt.file)
- throw new TypeError('file is required')
-
- if (opt.gzip)
- throw new TypeError('cannot append to compressed archives')
-
- if (!files || !Array.isArray(files) || !files.length)
- throw new TypeError('no files or directories specified')
-
- files = Array.from(files)
-
- mtimeFilter(opt)
- return r(opt, files, cb)
-}
-
-const mtimeFilter = opt => {
- const filter = opt.filter
-
- if (!opt.mtimeCache)
- opt.mtimeCache = new Map()
-
- opt.filter = filter ? (path, stat) =>
- filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime)
- : (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime)
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/warn-mixin.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/warn-mixin.js
deleted file mode 100644
index aeebb531..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/warn-mixin.js
+++ /dev/null
@@ -1,21 +0,0 @@
-'use strict'
-module.exports = Base => class extends Base {
- warn (code, message, data = {}) {
- if (this.file)
- data.file = this.file
- if (this.cwd)
- data.cwd = this.cwd
- data.code = message instanceof Error && message.code || code
- data.tarCode = code
- if (!this.strict && data.recoverable !== false) {
- if (message instanceof Error) {
- data = Object.assign(message, data)
- message = message.message
- }
- this.emit('warn', data.tarCode, message, data)
- } else if (message instanceof Error)
- this.emit('error', Object.assign(message, data))
- else
- this.emit('error', Object.assign(new Error(`${code}: ${message}`), data))
- }
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/winchars.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/winchars.js
deleted file mode 100644
index ebcab4ae..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/winchars.js
+++ /dev/null
@@ -1,23 +0,0 @@
-'use strict'
-
-// When writing files on Windows, translate the characters to their
-// 0xf000 higher-encoded versions.
-
-const raw = [
- '|',
- '<',
- '>',
- '?',
- ':',
-]
-
-const win = raw.map(char =>
- String.fromCharCode(0xf000 + char.charCodeAt(0)))
-
-const toWin = new Map(raw.map((char, i) => [char, win[i]]))
-const toRaw = new Map(win.map((char, i) => [char, raw[i]]))
-
-module.exports = {
- encode: s => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s),
- decode: s => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s),
-}
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/lib/write-entry.js b/sandbox/testAppNevena/Front/node_modules/tar/lib/write-entry.js
deleted file mode 100644
index 3702f2ae..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/lib/write-entry.js
+++ /dev/null
@@ -1,525 +0,0 @@
-'use strict'
-const MiniPass = require('minipass')
-const Pax = require('./pax.js')
-const Header = require('./header.js')
-const fs = require('fs')
-const path = require('path')
-const normPath = require('./normalize-windows-path.js')
-const stripSlash = require('./strip-trailing-slashes.js')
-
-const prefixPath = (path, prefix) => {
- if (!prefix)
- return normPath(path)
- path = normPath(path).replace(/^\.(\/|$)/, '')
- return stripSlash(prefix) + '/' + path
-}
-
-const maxReadSize = 16 * 1024 * 1024
-const PROCESS = Symbol('process')
-const FILE = Symbol('file')
-const DIRECTORY = Symbol('directory')
-const SYMLINK = Symbol('symlink')
-const HARDLINK = Symbol('hardlink')
-const HEADER = Symbol('header')
-const READ = Symbol('read')
-const LSTAT = Symbol('lstat')
-const ONLSTAT = Symbol('onlstat')
-const ONREAD = Symbol('onread')
-const ONREADLINK = Symbol('onreadlink')
-const OPENFILE = Symbol('openfile')
-const ONOPENFILE = Symbol('onopenfile')
-const CLOSE = Symbol('close')
-const MODE = Symbol('mode')
-const AWAITDRAIN = Symbol('awaitDrain')
-const ONDRAIN = Symbol('ondrain')
-const PREFIX = Symbol('prefix')
-const HAD_ERROR = Symbol('hadError')
-const warner = require('./warn-mixin.js')
-const winchars = require('./winchars.js')
-const stripAbsolutePath = require('./strip-absolute-path.js')
-
-const modeFix = require('./mode-fix.js')
-
-const WriteEntry = warner(class WriteEntry extends MiniPass {
- constructor (p, opt) {
- opt = opt || {}
- super(opt)
- if (typeof p !== 'string')
- throw new TypeError('path is required')
- this.path = normPath(p)
- // suppress atime, ctime, uid, gid, uname, gname
- this.portable = !!opt.portable
- // until node has builtin pwnam functions, this'll have to do
- this.myuid = process.getuid && process.getuid() || 0
- this.myuser = process.env.USER || ''
- this.maxReadSize = opt.maxReadSize || maxReadSize
- this.linkCache = opt.linkCache || new Map()
- this.statCache = opt.statCache || new Map()
- this.preservePaths = !!opt.preservePaths
- this.cwd = normPath(opt.cwd || process.cwd())
- this.strict = !!opt.strict
- this.noPax = !!opt.noPax
- this.noMtime = !!opt.noMtime
- this.mtime = opt.mtime || null
- this.prefix = opt.prefix ? normPath(opt.prefix) : null
-
- this.fd = null
- this.blockLen = null
- this.blockRemain = null
- this.buf = null
- this.offset = null
- this.length = null
- this.pos = null
- this.remain = null
-
- if (typeof opt.onwarn === 'function')
- this.on('warn', opt.onwarn)
-
- let pathWarn = false
- if (!this.preservePaths) {
- const [root, stripped] = stripAbsolutePath(this.path)
- if (root) {
- this.path = stripped
- pathWarn = root
- }
- }
-
- this.win32 = !!opt.win32 || process.platform === 'win32'
- if (this.win32) {
- // force the \ to / normalization, since we might not *actually*
- // be on windows, but want \ to be considered a path separator.
- this.path = winchars.decode(this.path.replace(/\\/g, '/'))
- p = p.replace(/\\/g, '/')
- }
-
- this.absolute = normPath(opt.absolute || path.resolve(this.cwd, p))
-
- if (this.path === '')
- this.path = './'
-
- if (pathWarn) {
- this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
- entry: this,
- path: pathWarn + this.path,
- })
- }
-
- if (this.statCache.has(this.absolute))
- this[ONLSTAT](this.statCache.get(this.absolute))
- else
- this[LSTAT]()
- }
-
- emit (ev, ...data) {
- if (ev === 'error')
- this[HAD_ERROR] = true
- return super.emit(ev, ...data)
- }
-
- [LSTAT] () {
- fs.lstat(this.absolute, (er, stat) => {
- if (er)
- return this.emit('error', er)
- this[ONLSTAT](stat)
- })
- }
-
- [ONLSTAT] (stat) {
- this.statCache.set(this.absolute, stat)
- this.stat = stat
- if (!stat.isFile())
- stat.size = 0
- this.type = getType(stat)
- this.emit('stat', stat)
- this[PROCESS]()
- }
-
- [PROCESS] () {
- switch (this.type) {
- case 'File': return this[FILE]()
- case 'Directory': return this[DIRECTORY]()
- case 'SymbolicLink': return this[SYMLINK]()
- // unsupported types are ignored.
- default: return this.end()
- }
- }
-
- [MODE] (mode) {
- return modeFix(mode, this.type === 'Directory', this.portable)
- }
-
- [PREFIX] (path) {
- return prefixPath(path, this.prefix)
- }
-
- [HEADER] () {
- if (this.type === 'Directory' && this.portable)
- this.noMtime = true
-
- this.header = new Header({
- path: this[PREFIX](this.path),
- // only apply the prefix to hard links.
- linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
- : this.linkpath,
- // only the permissions and setuid/setgid/sticky bitflags
- // not the higher-order bits that specify file type
- mode: this[MODE](this.stat.mode),
- uid: this.portable ? null : this.stat.uid,
- gid: this.portable ? null : this.stat.gid,
- size: this.stat.size,
- mtime: this.noMtime ? null : this.mtime || this.stat.mtime,
- type: this.type,
- uname: this.portable ? null :
- this.stat.uid === this.myuid ? this.myuser : '',
- atime: this.portable ? null : this.stat.atime,
- ctime: this.portable ? null : this.stat.ctime,
- })
-
- if (this.header.encode() && !this.noPax) {
- super.write(new Pax({
- atime: this.portable ? null : this.header.atime,
- ctime: this.portable ? null : this.header.ctime,
- gid: this.portable ? null : this.header.gid,
- mtime: this.noMtime ? null : this.mtime || this.header.mtime,
- path: this[PREFIX](this.path),
- linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
- : this.linkpath,
- size: this.header.size,
- uid: this.portable ? null : this.header.uid,
- uname: this.portable ? null : this.header.uname,
- dev: this.portable ? null : this.stat.dev,
- ino: this.portable ? null : this.stat.ino,
- nlink: this.portable ? null : this.stat.nlink,
- }).encode())
- }
- super.write(this.header.block)
- }
-
- [DIRECTORY] () {
- if (this.path.substr(-1) !== '/')
- this.path += '/'
- this.stat.size = 0
- this[HEADER]()
- this.end()
- }
-
- [SYMLINK] () {
- fs.readlink(this.absolute, (er, linkpath) => {
- if (er)
- return this.emit('error', er)
- this[ONREADLINK](linkpath)
- })
- }
-
- [ONREADLINK] (linkpath) {
- this.linkpath = normPath(linkpath)
- this[HEADER]()
- this.end()
- }
-
- [HARDLINK] (linkpath) {
- this.type = 'Link'
- this.linkpath = normPath(path.relative(this.cwd, linkpath))
- this.stat.size = 0
- this[HEADER]()
- this.end()
- }
-
- [FILE] () {
- if (this.stat.nlink > 1) {
- const linkKey = this.stat.dev + ':' + this.stat.ino
- if (this.linkCache.has(linkKey)) {
- const linkpath = this.linkCache.get(linkKey)
- if (linkpath.indexOf(this.cwd) === 0)
- return this[HARDLINK](linkpath)
- }
- this.linkCache.set(linkKey, this.absolute)
- }
-
- this[HEADER]()
- if (this.stat.size === 0)
- return this.end()
-
- this[OPENFILE]()
- }
-
- [OPENFILE] () {
- fs.open(this.absolute, 'r', (er, fd) => {
- if (er)
- return this.emit('error', er)
- this[ONOPENFILE](fd)
- })
- }
-
- [ONOPENFILE] (fd) {
- this.fd = fd
- if (this[HAD_ERROR])
- return this[CLOSE]()
-
- this.blockLen = 512 * Math.ceil(this.stat.size / 512)
- this.blockRemain = this.blockLen
- const bufLen = Math.min(this.blockLen, this.maxReadSize)
- this.buf = Buffer.allocUnsafe(bufLen)
- this.offset = 0
- this.pos = 0
- this.remain = this.stat.size
- this.length = this.buf.length
- this[READ]()
- }
-
- [READ] () {
- const { fd, buf, offset, length, pos } = this
- fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
- if (er) {
- // ignoring the error from close(2) is a bad practice, but at
- // this point we already have an error, don't need another one
- return this[CLOSE](() => this.emit('error', er))
- }
- this[ONREAD](bytesRead)
- })
- }
-
- [CLOSE] (cb) {
- fs.close(this.fd, cb)
- }
-
- [ONREAD] (bytesRead) {
- if (bytesRead <= 0 && this.remain > 0) {
- const er = new Error('encountered unexpected EOF')
- er.path = this.absolute
- er.syscall = 'read'
- er.code = 'EOF'
- return this[CLOSE](() => this.emit('error', er))
- }
-
- if (bytesRead > this.remain) {
- const er = new Error('did not encounter expected EOF')
- er.path = this.absolute
- er.syscall = 'read'
- er.code = 'EOF'
- return this[CLOSE](() => this.emit('error', er))
- }
-
- // null out the rest of the buffer, if we could fit the block padding
- // at the end of this loop, we've incremented bytesRead and this.remain
- // to be incremented up to the blockRemain level, as if we had expected
- // to get a null-padded file, and read it until the end. then we will
- // decrement both remain and blockRemain by bytesRead, and know that we
- // reached the expected EOF, without any null buffer to append.
- if (bytesRead === this.remain) {
- for (let i = bytesRead; i < this.length && bytesRead < this.blockRemain; i++) {
- this.buf[i + this.offset] = 0
- bytesRead++
- this.remain++
- }
- }
-
- const writeBuf = this.offset === 0 && bytesRead === this.buf.length ?
- this.buf : this.buf.slice(this.offset, this.offset + bytesRead)
-
- const flushed = this.write(writeBuf)
- if (!flushed)
- this[AWAITDRAIN](() => this[ONDRAIN]())
- else
- this[ONDRAIN]()
- }
-
- [AWAITDRAIN] (cb) {
- this.once('drain', cb)
- }
-
- write (writeBuf) {
- if (this.blockRemain < writeBuf.length) {
- const er = new Error('writing more data than expected')
- er.path = this.absolute
- return this.emit('error', er)
- }
- this.remain -= writeBuf.length
- this.blockRemain -= writeBuf.length
- this.pos += writeBuf.length
- this.offset += writeBuf.length
- return super.write(writeBuf)
- }
-
- [ONDRAIN] () {
- if (!this.remain) {
- if (this.blockRemain)
- super.write(Buffer.alloc(this.blockRemain))
- return this[CLOSE](er => er ? this.emit('error', er) : this.end())
- }
-
- if (this.offset >= this.length) {
- // if we only have a smaller bit left to read, alloc a smaller buffer
- // otherwise, keep it the same length it was before.
- this.buf = Buffer.allocUnsafe(Math.min(this.blockRemain, this.buf.length))
- this.offset = 0
- }
- this.length = this.buf.length - this.offset
- this[READ]()
- }
-})
-
-class WriteEntrySync extends WriteEntry {
- [LSTAT] () {
- this[ONLSTAT](fs.lstatSync(this.absolute))
- }
-
- [SYMLINK] () {
- this[ONREADLINK](fs.readlinkSync(this.absolute))
- }
-
- [OPENFILE] () {
- this[ONOPENFILE](fs.openSync(this.absolute, 'r'))
- }
-
- [READ] () {
- let threw = true
- try {
- const { fd, buf, offset, length, pos } = this
- const bytesRead = fs.readSync(fd, buf, offset, length, pos)
- this[ONREAD](bytesRead)
- threw = false
- } finally {
- // ignoring the error from close(2) is a bad practice, but at
- // this point we already have an error, don't need another one
- if (threw) {
- try {
- this[CLOSE](() => {})
- } catch (er) {}
- }
- }
- }
-
- [AWAITDRAIN] (cb) {
- cb()
- }
-
- [CLOSE] (cb) {
- fs.closeSync(this.fd)
- cb()
- }
-}
-
-const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
- constructor (readEntry, opt) {
- opt = opt || {}
- super(opt)
- this.preservePaths = !!opt.preservePaths
- this.portable = !!opt.portable
- this.strict = !!opt.strict
- this.noPax = !!opt.noPax
- this.noMtime = !!opt.noMtime
-
- this.readEntry = readEntry
- this.type = readEntry.type
- if (this.type === 'Directory' && this.portable)
- this.noMtime = true
-
- this.prefix = opt.prefix || null
-
- this.path = normPath(readEntry.path)
- this.mode = this[MODE](readEntry.mode)
- this.uid = this.portable ? null : readEntry.uid
- this.gid = this.portable ? null : readEntry.gid
- this.uname = this.portable ? null : readEntry.uname
- this.gname = this.portable ? null : readEntry.gname
- this.size = readEntry.size
- this.mtime = this.noMtime ? null : opt.mtime || readEntry.mtime
- this.atime = this.portable ? null : readEntry.atime
- this.ctime = this.portable ? null : readEntry.ctime
- this.linkpath = normPath(readEntry.linkpath)
-
- if (typeof opt.onwarn === 'function')
- this.on('warn', opt.onwarn)
-
- let pathWarn = false
- if (!this.preservePaths) {
- const [root, stripped] = stripAbsolutePath(this.path)
- if (root) {
- this.path = stripped
- pathWarn = root
- }
- }
-
- this.remain = readEntry.size
- this.blockRemain = readEntry.startBlockSize
-
- this.header = new Header({
- path: this[PREFIX](this.path),
- linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
- : this.linkpath,
- // only the permissions and setuid/setgid/sticky bitflags
- // not the higher-order bits that specify file type
- mode: this.mode,
- uid: this.portable ? null : this.uid,
- gid: this.portable ? null : this.gid,
- size: this.size,
- mtime: this.noMtime ? null : this.mtime,
- type: this.type,
- uname: this.portable ? null : this.uname,
- atime: this.portable ? null : this.atime,
- ctime: this.portable ? null : this.ctime,
- })
-
- if (pathWarn) {
- this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
- entry: this,
- path: pathWarn + this.path,
- })
- }
-
- if (this.header.encode() && !this.noPax) {
- super.write(new Pax({
- atime: this.portable ? null : this.atime,
- ctime: this.portable ? null : this.ctime,
- gid: this.portable ? null : this.gid,
- mtime: this.noMtime ? null : this.mtime,
- path: this[PREFIX](this.path),
- linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
- : this.linkpath,
- size: this.size,
- uid: this.portable ? null : this.uid,
- uname: this.portable ? null : this.uname,
- dev: this.portable ? null : this.readEntry.dev,
- ino: this.portable ? null : this.readEntry.ino,
- nlink: this.portable ? null : this.readEntry.nlink,
- }).encode())
- }
-
- super.write(this.header.block)
- readEntry.pipe(this)
- }
-
- [PREFIX] (path) {
- return prefixPath(path, this.prefix)
- }
-
- [MODE] (mode) {
- return modeFix(mode, this.type === 'Directory', this.portable)
- }
-
- write (data) {
- const writeLen = data.length
- if (writeLen > this.blockRemain)
- throw new Error('writing more to entry than is appropriate')
- this.blockRemain -= writeLen
- return super.write(data)
- }
-
- end () {
- if (this.blockRemain)
- super.write(Buffer.alloc(this.blockRemain))
- return super.end()
- }
-})
-
-WriteEntry.Sync = WriteEntrySync
-WriteEntry.Tar = WriteEntryTar
-
-const getType = stat =>
- stat.isFile() ? 'File'
- : stat.isDirectory() ? 'Directory'
- : stat.isSymbolicLink() ? 'SymbolicLink'
- : 'Unsupported'
-
-module.exports = WriteEntry
diff --git a/sandbox/testAppNevena/Front/node_modules/tar/package.json b/sandbox/testAppNevena/Front/node_modules/tar/package.json
deleted file mode 100644
index 9f9977a0..00000000
--- a/sandbox/testAppNevena/Front/node_modules/tar/package.json
+++ /dev/null
@@ -1,59 +0,0 @@
-{
- "author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me/)",
- "name": "tar",
- "description": "tar for node",
- "version": "6.1.11",
- "repository": {
- "type": "git",
- "url": "https://github.com/npm/node-tar.git"
- },
- "scripts": {
- "test:posix": "tap",
- "test:win32": "tap --lines=98 --branches=98 --statements=98 --functions=98",
- "test": "node test/fixtures/test.js",
- "posttest": "npm run lint",
- "eslint": "eslint",
- "lint": "npm run eslint -- test lib",
- "lintfix": "npm run lint -- --fix",
- "preversion": "npm test",
- "postversion": "npm publish",
- "prepublishOnly": "git push origin --follow-tags",
- "genparse": "node scripts/generate-parse-fixtures.js",
- "bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done"
- },
- "dependencies": {
- "chownr": "^2.0.0",
- "fs-minipass": "^2.0.0",
- "minipass": "^3.0.0",
- "minizlib": "^2.1.1",
- "mkdirp": "^1.0.3",
- "yallist": "^4.0.0"
- },
- "devDependencies": {
- "chmodr": "^1.2.0",
- "end-of-stream": "^1.4.3",
- "eslint": "^7.17.0",
- "eslint-plugin-import": "^2.22.1",
- "eslint-plugin-node": "^11.1.0",
- "eslint-plugin-promise": "^4.2.1",
- "eslint-plugin-standard": "^5.0.0",
- "events-to-array": "^1.1.2",
- "mutate-fs": "^2.1.1",
- "rimraf": "^2.7.1",
- "tap": "^15.0.9",
- "tar-fs": "^1.16.3",
- "tar-stream": "^1.6.2"
- },
- "license": "ISC",
- "engines": {
- "node": ">= 10"
- },
- "files": [
- "index.js",
- "lib/*.js"
- ],
- "tap": {
- "coverage-map": "map.js",
- "check-coverage": true
- }
-}