1st version
This commit is contained in:
171
Jira_helper/node_modules/.package-lock.json
generated
vendored
Normal file
171
Jira_helper/node_modules/.package-lock.json
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
{
|
||||
"name": "Jira_helper",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"node_modules/ansi-regex": {
|
||||
"version": "6.2.2",
|
||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
|
||||
"integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/ansi-styles": {
|
||||
"version": "6.2.3",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
|
||||
"integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/cliui": {
|
||||
"version": "9.0.1",
|
||||
"resolved": "https://registry.npmjs.org/cliui/-/cliui-9.0.1.tgz",
|
||||
"integrity": "sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w==",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"string-width": "^7.2.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"wrap-ansi": "^9.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
}
|
||||
},
|
||||
"node_modules/csv-parse": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/csv-parse/-/csv-parse-6.1.0.tgz",
|
||||
"integrity": "sha512-CEE+jwpgLn+MmtCpVcPtiCZpVtB6Z2OKPTr34pycYYoL7sxdOkXDdQ4lRiw6ioC0q6BLqhc6cKweCVvral8yhw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/emoji-regex": {
|
||||
"version": "10.5.0",
|
||||
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.5.0.tgz",
|
||||
"integrity": "sha512-lb49vf1Xzfx080OKA0o6l8DQQpV+6Vg95zyCJX9VB/BqKYlhG7N4wgROUUHRA+ZPUefLnteQOad7z1kT2bV7bg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/escalade": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
|
||||
"integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/get-caller-file": {
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
|
||||
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": "6.* || 8.* || >= 10.*"
|
||||
}
|
||||
},
|
||||
"node_modules/get-east-asian-width": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz",
|
||||
"integrity": "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/string-width": {
|
||||
"version": "7.2.0",
|
||||
"resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
|
||||
"integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"emoji-regex": "^10.3.0",
|
||||
"get-east-asian-width": "^1.0.0",
|
||||
"strip-ansi": "^7.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/strip-ansi": {
|
||||
"version": "7.1.2",
|
||||
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
|
||||
"integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ansi-regex": "^6.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/wrap-ansi": {
|
||||
"version": "9.0.2",
|
||||
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
|
||||
"integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ansi-styles": "^6.2.1",
|
||||
"string-width": "^7.0.0",
|
||||
"strip-ansi": "^7.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/y18n": {
|
||||
"version": "5.0.8",
|
||||
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
|
||||
"integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/yargs": {
|
||||
"version": "18.0.0",
|
||||
"resolved": "https://registry.npmjs.org/yargs/-/yargs-18.0.0.tgz",
|
||||
"integrity": "sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"cliui": "^9.0.1",
|
||||
"escalade": "^3.1.1",
|
||||
"get-caller-file": "^2.0.5",
|
||||
"string-width": "^7.2.0",
|
||||
"y18n": "^5.0.5",
|
||||
"yargs-parser": "^22.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^20.19.0 || ^22.12.0 || >=23"
|
||||
}
|
||||
},
|
||||
"node_modules/yargs-parser": {
|
||||
"version": "22.0.0",
|
||||
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-22.0.0.tgz",
|
||||
"integrity": "sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": "^20.19.0 || ^22.12.0 || >=23"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
33
Jira_helper/node_modules/ansi-regex/index.d.ts
generated
vendored
Normal file
33
Jira_helper/node_modules/ansi-regex/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
export type Options = {
|
||||
/**
|
||||
Match only the first ANSI escape.
|
||||
|
||||
@default false
|
||||
*/
|
||||
readonly onlyFirst: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
Regular expression for matching ANSI escape codes.
|
||||
|
||||
@example
|
||||
```
|
||||
import ansiRegex from 'ansi-regex';
|
||||
|
||||
ansiRegex().test('\u001B[4mcake\u001B[0m');
|
||||
//=> true
|
||||
|
||||
ansiRegex().test('cake');
|
||||
//=> false
|
||||
|
||||
'\u001B[4mcake\u001B[0m'.match(ansiRegex());
|
||||
//=> ['\u001B[4m', '\u001B[0m']
|
||||
|
||||
'\u001B[4mcake\u001B[0m'.match(ansiRegex({onlyFirst: true}));
|
||||
//=> ['\u001B[4m']
|
||||
|
||||
'\u001B]8;;https://github.com\u0007click\u001B]8;;\u0007'.match(ansiRegex());
|
||||
//=> ['\u001B]8;;https://github.com\u0007', '\u001B]8;;\u0007']
|
||||
```
|
||||
*/
|
||||
export default function ansiRegex(options?: Options): RegExp;
|
||||
14
Jira_helper/node_modules/ansi-regex/index.js
generated
vendored
Normal file
14
Jira_helper/node_modules/ansi-regex/index.js
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
export default function ansiRegex({onlyFirst = false} = {}) {
|
||||
// Valid string terminator sequences are BEL, ESC\, and 0x9c
|
||||
const ST = '(?:\\u0007|\\u001B\\u005C|\\u009C)';
|
||||
|
||||
// OSC sequences only: ESC ] ... ST (non-greedy until the first ST)
|
||||
const osc = `(?:\\u001B\\][\\s\\S]*?${ST})`;
|
||||
|
||||
// CSI and related: ESC/C1, optional intermediates, optional params (supports ; and :) then final byte
|
||||
const csi = '[\\u001B\\u009B][[\\]()#;?]*(?:\\d{1,4}(?:[;:]\\d{0,4})*)?[\\dA-PR-TZcf-nq-uy=><~]';
|
||||
|
||||
const pattern = `${osc}|${csi}`;
|
||||
|
||||
return new RegExp(pattern, onlyFirst ? undefined : 'g');
|
||||
}
|
||||
9
Jira_helper/node_modules/ansi-regex/license
generated
vendored
Normal file
9
Jira_helper/node_modules/ansi-regex/license
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
61
Jira_helper/node_modules/ansi-regex/package.json
generated
vendored
Normal file
61
Jira_helper/node_modules/ansi-regex/package.json
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"name": "ansi-regex",
|
||||
"version": "6.2.2",
|
||||
"description": "Regular expression for matching ANSI escape codes",
|
||||
"license": "MIT",
|
||||
"repository": "chalk/ansi-regex",
|
||||
"funding": "https://github.com/chalk/ansi-regex?sponsor=1",
|
||||
"author": {
|
||||
"name": "Sindre Sorhus",
|
||||
"email": "sindresorhus@gmail.com",
|
||||
"url": "https://sindresorhus.com"
|
||||
},
|
||||
"type": "module",
|
||||
"exports": "./index.js",
|
||||
"types": "./index.d.ts",
|
||||
"sideEffects": false,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "xo && ava && tsd",
|
||||
"view-supported": "node fixtures/view-codes.js"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.d.ts"
|
||||
],
|
||||
"keywords": [
|
||||
"ansi",
|
||||
"styles",
|
||||
"color",
|
||||
"colour",
|
||||
"colors",
|
||||
"terminal",
|
||||
"console",
|
||||
"cli",
|
||||
"string",
|
||||
"tty",
|
||||
"escape",
|
||||
"formatting",
|
||||
"rgb",
|
||||
"256",
|
||||
"shell",
|
||||
"xterm",
|
||||
"command-line",
|
||||
"text",
|
||||
"regex",
|
||||
"regexp",
|
||||
"re",
|
||||
"match",
|
||||
"test",
|
||||
"find",
|
||||
"pattern"
|
||||
],
|
||||
"devDependencies": {
|
||||
"ansi-escapes": "^5.0.0",
|
||||
"ava": "^3.15.0",
|
||||
"tsd": "^0.21.0",
|
||||
"xo": "^0.54.2"
|
||||
}
|
||||
}
|
||||
66
Jira_helper/node_modules/ansi-regex/readme.md
generated
vendored
Normal file
66
Jira_helper/node_modules/ansi-regex/readme.md
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
# ansi-regex
|
||||
|
||||
> Regular expression for matching [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code)
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
npm install ansi-regex
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import ansiRegex from 'ansi-regex';
|
||||
|
||||
ansiRegex().test('\u001B[4mcake\u001B[0m');
|
||||
//=> true
|
||||
|
||||
ansiRegex().test('cake');
|
||||
//=> false
|
||||
|
||||
'\u001B[4mcake\u001B[0m'.match(ansiRegex());
|
||||
//=> ['\u001B[4m', '\u001B[0m']
|
||||
|
||||
'\u001B[4mcake\u001B[0m'.match(ansiRegex({onlyFirst: true}));
|
||||
//=> ['\u001B[4m']
|
||||
|
||||
'\u001B]8;;https://github.com\u0007click\u001B]8;;\u0007'.match(ansiRegex());
|
||||
//=> ['\u001B]8;;https://github.com\u0007', '\u001B]8;;\u0007']
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
### ansiRegex(options?)
|
||||
|
||||
Returns a regex for matching ANSI escape codes.
|
||||
|
||||
#### options
|
||||
|
||||
Type: `object`
|
||||
|
||||
##### onlyFirst
|
||||
|
||||
Type: `boolean`\
|
||||
Default: `false` *(Matches any ANSI escape codes in a string)*
|
||||
|
||||
Match only the first ANSI escape.
|
||||
|
||||
## Important
|
||||
|
||||
If you run the regex against untrusted user input in a server context, you should [give it a timeout](https://github.com/sindresorhus/super-regex).
|
||||
|
||||
**I do not consider [ReDoS](https://blog.yossarian.net/2022/12/28/ReDoS-vulnerabilities-and-misaligned-incentives) a valid vulnerability for this package.**
|
||||
|
||||
## FAQ
|
||||
|
||||
### Why do you test for codes not in the ECMA 48 standard?
|
||||
|
||||
Some of the codes we run as a test are codes that we acquired finding various lists of non-standard or manufacturer specific codes. We test for both standard and non-standard codes, as most of them follow the same or similar format and can be safely matched in strings without the risk of removing actual string content. There are a few non-standard control codes that do not follow the traditional format (i.e. they end in numbers) thus forcing us to exclude them from the test because we cannot reliably match them.
|
||||
|
||||
On the historical side, those ECMA standards were established in the early 90's whereas the VT100, for example, was designed in the mid/late 70's. At that point in time, control codes were still pretty ungoverned and engineers used them for a multitude of things, namely to activate hardware ports that may have been proprietary. Somewhere else you see a similar 'anarchy' of codes is in the x86 architecture for processors; there are a ton of "interrupts" that can mean different things on certain brands of processors, most of which have been phased out.
|
||||
|
||||
## Maintainers
|
||||
|
||||
- [Sindre Sorhus](https://github.com/sindresorhus)
|
||||
- [Josh Junon](https://github.com/qix-)
|
||||
236
Jira_helper/node_modules/ansi-styles/index.d.ts
generated
vendored
Normal file
236
Jira_helper/node_modules/ansi-styles/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
export type CSPair = { // eslint-disable-line @typescript-eslint/naming-convention
|
||||
/**
|
||||
The ANSI terminal control sequence for starting this style.
|
||||
*/
|
||||
readonly open: string;
|
||||
|
||||
/**
|
||||
The ANSI terminal control sequence for ending this style.
|
||||
*/
|
||||
readonly close: string;
|
||||
};
|
||||
|
||||
export type ColorBase = {
|
||||
/**
|
||||
The ANSI terminal control sequence for ending this color.
|
||||
*/
|
||||
readonly close: string;
|
||||
|
||||
ansi(code: number): string;
|
||||
|
||||
ansi256(code: number): string;
|
||||
|
||||
ansi16m(red: number, green: number, blue: number): string;
|
||||
};
|
||||
|
||||
export type Modifier = {
|
||||
/**
|
||||
Resets the current color chain.
|
||||
*/
|
||||
readonly reset: CSPair;
|
||||
|
||||
/**
|
||||
Make text bold.
|
||||
*/
|
||||
readonly bold: CSPair;
|
||||
|
||||
/**
|
||||
Emitting only a small amount of light.
|
||||
*/
|
||||
readonly dim: CSPair;
|
||||
|
||||
/**
|
||||
Make text italic. (Not widely supported)
|
||||
*/
|
||||
readonly italic: CSPair;
|
||||
|
||||
/**
|
||||
Make text underline. (Not widely supported)
|
||||
*/
|
||||
readonly underline: CSPair;
|
||||
|
||||
/**
|
||||
Make text overline.
|
||||
|
||||
Supported on VTE-based terminals, the GNOME terminal, mintty, and Git Bash.
|
||||
*/
|
||||
readonly overline: CSPair;
|
||||
|
||||
/**
|
||||
Inverse background and foreground colors.
|
||||
*/
|
||||
readonly inverse: CSPair;
|
||||
|
||||
/**
|
||||
Prints the text, but makes it invisible.
|
||||
*/
|
||||
readonly hidden: CSPair;
|
||||
|
||||
/**
|
||||
Puts a horizontal line through the center of the text. (Not widely supported)
|
||||
*/
|
||||
readonly strikethrough: CSPair;
|
||||
};
|
||||
|
||||
export type ForegroundColor = {
|
||||
readonly black: CSPair;
|
||||
readonly red: CSPair;
|
||||
readonly green: CSPair;
|
||||
readonly yellow: CSPair;
|
||||
readonly blue: CSPair;
|
||||
readonly cyan: CSPair;
|
||||
readonly magenta: CSPair;
|
||||
readonly white: CSPair;
|
||||
|
||||
/**
|
||||
Alias for `blackBright`.
|
||||
*/
|
||||
readonly gray: CSPair;
|
||||
|
||||
/**
|
||||
Alias for `blackBright`.
|
||||
*/
|
||||
readonly grey: CSPair;
|
||||
|
||||
readonly blackBright: CSPair;
|
||||
readonly redBright: CSPair;
|
||||
readonly greenBright: CSPair;
|
||||
readonly yellowBright: CSPair;
|
||||
readonly blueBright: CSPair;
|
||||
readonly cyanBright: CSPair;
|
||||
readonly magentaBright: CSPair;
|
||||
readonly whiteBright: CSPair;
|
||||
};
|
||||
|
||||
export type BackgroundColor = {
|
||||
readonly bgBlack: CSPair;
|
||||
readonly bgRed: CSPair;
|
||||
readonly bgGreen: CSPair;
|
||||
readonly bgYellow: CSPair;
|
||||
readonly bgBlue: CSPair;
|
||||
readonly bgCyan: CSPair;
|
||||
readonly bgMagenta: CSPair;
|
||||
readonly bgWhite: CSPair;
|
||||
|
||||
/**
|
||||
Alias for `bgBlackBright`.
|
||||
*/
|
||||
readonly bgGray: CSPair;
|
||||
|
||||
/**
|
||||
Alias for `bgBlackBright`.
|
||||
*/
|
||||
readonly bgGrey: CSPair;
|
||||
|
||||
readonly bgBlackBright: CSPair;
|
||||
readonly bgRedBright: CSPair;
|
||||
readonly bgGreenBright: CSPair;
|
||||
readonly bgYellowBright: CSPair;
|
||||
readonly bgBlueBright: CSPair;
|
||||
readonly bgCyanBright: CSPair;
|
||||
readonly bgMagentaBright: CSPair;
|
||||
readonly bgWhiteBright: CSPair;
|
||||
};
|
||||
|
||||
export type ConvertColor = {
|
||||
/**
|
||||
Convert from the RGB color space to the ANSI 256 color space.
|
||||
|
||||
@param red - (`0...255`)
|
||||
@param green - (`0...255`)
|
||||
@param blue - (`0...255`)
|
||||
*/
|
||||
rgbToAnsi256(red: number, green: number, blue: number): number;
|
||||
|
||||
/**
|
||||
Convert from the RGB HEX color space to the RGB color space.
|
||||
|
||||
@param hex - A hexadecimal string containing RGB data.
|
||||
*/
|
||||
hexToRgb(hex: string): [red: number, green: number, blue: number];
|
||||
|
||||
/**
|
||||
Convert from the RGB HEX color space to the ANSI 256 color space.
|
||||
|
||||
@param hex - A hexadecimal string containing RGB data.
|
||||
*/
|
||||
hexToAnsi256(hex: string): number;
|
||||
|
||||
/**
|
||||
Convert from the ANSI 256 color space to the ANSI 16 color space.
|
||||
|
||||
@param code - A number representing the ANSI 256 color.
|
||||
*/
|
||||
ansi256ToAnsi(code: number): number;
|
||||
|
||||
/**
|
||||
Convert from the RGB color space to the ANSI 16 color space.
|
||||
|
||||
@param red - (`0...255`)
|
||||
@param green - (`0...255`)
|
||||
@param blue - (`0...255`)
|
||||
*/
|
||||
rgbToAnsi(red: number, green: number, blue: number): number;
|
||||
|
||||
/**
|
||||
Convert from the RGB HEX color space to the ANSI 16 color space.
|
||||
|
||||
@param hex - A hexadecimal string containing RGB data.
|
||||
*/
|
||||
hexToAnsi(hex: string): number;
|
||||
};
|
||||
|
||||
/**
|
||||
Basic modifier names.
|
||||
*/
|
||||
export type ModifierName = keyof Modifier;
|
||||
|
||||
/**
|
||||
Basic foreground color names.
|
||||
|
||||
[More colors here.](https://github.com/chalk/chalk/blob/main/readme.md#256-and-truecolor-color-support)
|
||||
*/
|
||||
export type ForegroundColorName = keyof ForegroundColor;
|
||||
|
||||
/**
|
||||
Basic background color names.
|
||||
|
||||
[More colors here.](https://github.com/chalk/chalk/blob/main/readme.md#256-and-truecolor-color-support)
|
||||
*/
|
||||
export type BackgroundColorName = keyof BackgroundColor;
|
||||
|
||||
/**
|
||||
Basic color names. The combination of foreground and background color names.
|
||||
|
||||
[More colors here.](https://github.com/chalk/chalk/blob/main/readme.md#256-and-truecolor-color-support)
|
||||
*/
|
||||
export type ColorName = ForegroundColorName | BackgroundColorName;
|
||||
|
||||
/**
|
||||
Basic modifier names.
|
||||
*/
|
||||
export const modifierNames: readonly ModifierName[];
|
||||
|
||||
/**
|
||||
Basic foreground color names.
|
||||
*/
|
||||
export const foregroundColorNames: readonly ForegroundColorName[];
|
||||
|
||||
/**
|
||||
Basic background color names.
|
||||
*/
|
||||
export const backgroundColorNames: readonly BackgroundColorName[];
|
||||
|
||||
/*
|
||||
Basic color names. The combination of foreground and background color names.
|
||||
*/
|
||||
export const colorNames: readonly ColorName[];
|
||||
|
||||
declare const ansiStyles: {
|
||||
readonly modifier: Modifier;
|
||||
readonly color: ColorBase & ForegroundColor;
|
||||
readonly bgColor: ColorBase & BackgroundColor;
|
||||
readonly codes: ReadonlyMap<number, number>;
|
||||
} & ForegroundColor & BackgroundColor & Modifier & ConvertColor;
|
||||
|
||||
export default ansiStyles;
|
||||
223
Jira_helper/node_modules/ansi-styles/index.js
generated
vendored
Normal file
223
Jira_helper/node_modules/ansi-styles/index.js
generated
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
const ANSI_BACKGROUND_OFFSET = 10;
|
||||
|
||||
const wrapAnsi16 = (offset = 0) => code => `\u001B[${code + offset}m`;
|
||||
|
||||
const wrapAnsi256 = (offset = 0) => code => `\u001B[${38 + offset};5;${code}m`;
|
||||
|
||||
const wrapAnsi16m = (offset = 0) => (red, green, blue) => `\u001B[${38 + offset};2;${red};${green};${blue}m`;
|
||||
|
||||
const styles = {
|
||||
modifier: {
|
||||
reset: [0, 0],
|
||||
// 21 isn't widely supported and 22 does the same thing
|
||||
bold: [1, 22],
|
||||
dim: [2, 22],
|
||||
italic: [3, 23],
|
||||
underline: [4, 24],
|
||||
overline: [53, 55],
|
||||
inverse: [7, 27],
|
||||
hidden: [8, 28],
|
||||
strikethrough: [9, 29],
|
||||
},
|
||||
color: {
|
||||
black: [30, 39],
|
||||
red: [31, 39],
|
||||
green: [32, 39],
|
||||
yellow: [33, 39],
|
||||
blue: [34, 39],
|
||||
magenta: [35, 39],
|
||||
cyan: [36, 39],
|
||||
white: [37, 39],
|
||||
|
||||
// Bright color
|
||||
blackBright: [90, 39],
|
||||
gray: [90, 39], // Alias of `blackBright`
|
||||
grey: [90, 39], // Alias of `blackBright`
|
||||
redBright: [91, 39],
|
||||
greenBright: [92, 39],
|
||||
yellowBright: [93, 39],
|
||||
blueBright: [94, 39],
|
||||
magentaBright: [95, 39],
|
||||
cyanBright: [96, 39],
|
||||
whiteBright: [97, 39],
|
||||
},
|
||||
bgColor: {
|
||||
bgBlack: [40, 49],
|
||||
bgRed: [41, 49],
|
||||
bgGreen: [42, 49],
|
||||
bgYellow: [43, 49],
|
||||
bgBlue: [44, 49],
|
||||
bgMagenta: [45, 49],
|
||||
bgCyan: [46, 49],
|
||||
bgWhite: [47, 49],
|
||||
|
||||
// Bright color
|
||||
bgBlackBright: [100, 49],
|
||||
bgGray: [100, 49], // Alias of `bgBlackBright`
|
||||
bgGrey: [100, 49], // Alias of `bgBlackBright`
|
||||
bgRedBright: [101, 49],
|
||||
bgGreenBright: [102, 49],
|
||||
bgYellowBright: [103, 49],
|
||||
bgBlueBright: [104, 49],
|
||||
bgMagentaBright: [105, 49],
|
||||
bgCyanBright: [106, 49],
|
||||
bgWhiteBright: [107, 49],
|
||||
},
|
||||
};
|
||||
|
||||
export const modifierNames = Object.keys(styles.modifier);
|
||||
export const foregroundColorNames = Object.keys(styles.color);
|
||||
export const backgroundColorNames = Object.keys(styles.bgColor);
|
||||
export const colorNames = [...foregroundColorNames, ...backgroundColorNames];
|
||||
|
||||
function assembleStyles() {
|
||||
const codes = new Map();
|
||||
|
||||
for (const [groupName, group] of Object.entries(styles)) {
|
||||
for (const [styleName, style] of Object.entries(group)) {
|
||||
styles[styleName] = {
|
||||
open: `\u001B[${style[0]}m`,
|
||||
close: `\u001B[${style[1]}m`,
|
||||
};
|
||||
|
||||
group[styleName] = styles[styleName];
|
||||
|
||||
codes.set(style[0], style[1]);
|
||||
}
|
||||
|
||||
Object.defineProperty(styles, groupName, {
|
||||
value: group,
|
||||
enumerable: false,
|
||||
});
|
||||
}
|
||||
|
||||
Object.defineProperty(styles, 'codes', {
|
||||
value: codes,
|
||||
enumerable: false,
|
||||
});
|
||||
|
||||
styles.color.close = '\u001B[39m';
|
||||
styles.bgColor.close = '\u001B[49m';
|
||||
|
||||
styles.color.ansi = wrapAnsi16();
|
||||
styles.color.ansi256 = wrapAnsi256();
|
||||
styles.color.ansi16m = wrapAnsi16m();
|
||||
styles.bgColor.ansi = wrapAnsi16(ANSI_BACKGROUND_OFFSET);
|
||||
styles.bgColor.ansi256 = wrapAnsi256(ANSI_BACKGROUND_OFFSET);
|
||||
styles.bgColor.ansi16m = wrapAnsi16m(ANSI_BACKGROUND_OFFSET);
|
||||
|
||||
// From https://github.com/Qix-/color-convert/blob/3f0e0d4e92e235796ccb17f6e85c72094a651f49/conversions.js
|
||||
Object.defineProperties(styles, {
|
||||
rgbToAnsi256: {
|
||||
value(red, green, blue) {
|
||||
// We use the extended greyscale palette here, with the exception of
|
||||
// black and white. normal palette only has 4 greyscale shades.
|
||||
if (red === green && green === blue) {
|
||||
if (red < 8) {
|
||||
return 16;
|
||||
}
|
||||
|
||||
if (red > 248) {
|
||||
return 231;
|
||||
}
|
||||
|
||||
return Math.round(((red - 8) / 247) * 24) + 232;
|
||||
}
|
||||
|
||||
return 16
|
||||
+ (36 * Math.round(red / 255 * 5))
|
||||
+ (6 * Math.round(green / 255 * 5))
|
||||
+ Math.round(blue / 255 * 5);
|
||||
},
|
||||
enumerable: false,
|
||||
},
|
||||
hexToRgb: {
|
||||
value(hex) {
|
||||
const matches = /[a-f\d]{6}|[a-f\d]{3}/i.exec(hex.toString(16));
|
||||
if (!matches) {
|
||||
return [0, 0, 0];
|
||||
}
|
||||
|
||||
let [colorString] = matches;
|
||||
|
||||
if (colorString.length === 3) {
|
||||
colorString = [...colorString].map(character => character + character).join('');
|
||||
}
|
||||
|
||||
const integer = Number.parseInt(colorString, 16);
|
||||
|
||||
return [
|
||||
/* eslint-disable no-bitwise */
|
||||
(integer >> 16) & 0xFF,
|
||||
(integer >> 8) & 0xFF,
|
||||
integer & 0xFF,
|
||||
/* eslint-enable no-bitwise */
|
||||
];
|
||||
},
|
||||
enumerable: false,
|
||||
},
|
||||
hexToAnsi256: {
|
||||
value: hex => styles.rgbToAnsi256(...styles.hexToRgb(hex)),
|
||||
enumerable: false,
|
||||
},
|
||||
ansi256ToAnsi: {
|
||||
value(code) {
|
||||
if (code < 8) {
|
||||
return 30 + code;
|
||||
}
|
||||
|
||||
if (code < 16) {
|
||||
return 90 + (code - 8);
|
||||
}
|
||||
|
||||
let red;
|
||||
let green;
|
||||
let blue;
|
||||
|
||||
if (code >= 232) {
|
||||
red = (((code - 232) * 10) + 8) / 255;
|
||||
green = red;
|
||||
blue = red;
|
||||
} else {
|
||||
code -= 16;
|
||||
|
||||
const remainder = code % 36;
|
||||
|
||||
red = Math.floor(code / 36) / 5;
|
||||
green = Math.floor(remainder / 6) / 5;
|
||||
blue = (remainder % 6) / 5;
|
||||
}
|
||||
|
||||
const value = Math.max(red, green, blue) * 2;
|
||||
|
||||
if (value === 0) {
|
||||
return 30;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-bitwise
|
||||
let result = 30 + ((Math.round(blue) << 2) | (Math.round(green) << 1) | Math.round(red));
|
||||
|
||||
if (value === 2) {
|
||||
result += 60;
|
||||
}
|
||||
|
||||
return result;
|
||||
},
|
||||
enumerable: false,
|
||||
},
|
||||
rgbToAnsi: {
|
||||
value: (red, green, blue) => styles.ansi256ToAnsi(styles.rgbToAnsi256(red, green, blue)),
|
||||
enumerable: false,
|
||||
},
|
||||
hexToAnsi: {
|
||||
value: hex => styles.ansi256ToAnsi(styles.hexToAnsi256(hex)),
|
||||
enumerable: false,
|
||||
},
|
||||
});
|
||||
|
||||
return styles;
|
||||
}
|
||||
|
||||
const ansiStyles = assembleStyles();
|
||||
|
||||
export default ansiStyles;
|
||||
9
Jira_helper/node_modules/ansi-styles/license
generated
vendored
Normal file
9
Jira_helper/node_modules/ansi-styles/license
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
54
Jira_helper/node_modules/ansi-styles/package.json
generated
vendored
Normal file
54
Jira_helper/node_modules/ansi-styles/package.json
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"name": "ansi-styles",
|
||||
"version": "6.2.3",
|
||||
"description": "ANSI escape codes for styling strings in the terminal",
|
||||
"license": "MIT",
|
||||
"repository": "chalk/ansi-styles",
|
||||
"funding": "https://github.com/chalk/ansi-styles?sponsor=1",
|
||||
"author": {
|
||||
"name": "Sindre Sorhus",
|
||||
"email": "sindresorhus@gmail.com",
|
||||
"url": "https://sindresorhus.com"
|
||||
},
|
||||
"type": "module",
|
||||
"exports": "./index.js",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "xo && ava && tsd",
|
||||
"screenshot": "svg-term --command='node screenshot' --out=screenshot.svg --padding=3 --width=55 --height=3 --at=1000 --no-cursor"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.d.ts"
|
||||
],
|
||||
"keywords": [
|
||||
"ansi",
|
||||
"styles",
|
||||
"color",
|
||||
"colour",
|
||||
"colors",
|
||||
"terminal",
|
||||
"console",
|
||||
"cli",
|
||||
"string",
|
||||
"tty",
|
||||
"escape",
|
||||
"formatting",
|
||||
"rgb",
|
||||
"256",
|
||||
"shell",
|
||||
"xterm",
|
||||
"log",
|
||||
"logging",
|
||||
"command-line",
|
||||
"text"
|
||||
],
|
||||
"devDependencies": {
|
||||
"ava": "^6.1.3",
|
||||
"svg-term-cli": "^2.1.1",
|
||||
"tsd": "^0.31.1",
|
||||
"xo": "^0.58.0"
|
||||
}
|
||||
}
|
||||
173
Jira_helper/node_modules/ansi-styles/readme.md
generated
vendored
Normal file
173
Jira_helper/node_modules/ansi-styles/readme.md
generated
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
# ansi-styles
|
||||
|
||||
> [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code#Colors_and_Styles) for styling strings in the terminal
|
||||
|
||||
You probably want the higher-level [chalk](https://github.com/chalk/chalk) module for styling your strings.
|
||||
|
||||

|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
npm install ansi-styles
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import styles from 'ansi-styles';
|
||||
|
||||
console.log(`${styles.green.open}Hello world!${styles.green.close}`);
|
||||
|
||||
|
||||
// Color conversion between 256/truecolor
|
||||
// NOTE: When converting from truecolor to 256 colors, the original color
|
||||
// may be degraded to fit the new color palette. This means terminals
|
||||
// that do not support 16 million colors will best-match the
|
||||
// original color.
|
||||
console.log(`${styles.color.ansi(styles.rgbToAnsi(199, 20, 250))}Hello World${styles.color.close}`)
|
||||
console.log(`${styles.color.ansi256(styles.rgbToAnsi256(199, 20, 250))}Hello World${styles.color.close}`)
|
||||
console.log(`${styles.color.ansi16m(...styles.hexToRgb('#abcdef'))}Hello World${styles.color.close}`)
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
### `open` and `close`
|
||||
|
||||
Each style has an `open` and `close` property.
|
||||
|
||||
### `modifierNames`, `foregroundColorNames`, `backgroundColorNames`, and `colorNames`
|
||||
|
||||
All supported style strings are exposed as an array of strings for convenience. `colorNames` is the combination of `foregroundColorNames` and `backgroundColorNames`.
|
||||
|
||||
This can be useful if you need to validate input:
|
||||
|
||||
```js
|
||||
import {modifierNames, foregroundColorNames} from 'ansi-styles';
|
||||
|
||||
console.log(modifierNames.includes('bold'));
|
||||
//=> true
|
||||
|
||||
console.log(foregroundColorNames.includes('pink'));
|
||||
//=> false
|
||||
```
|
||||
|
||||
## Styles
|
||||
|
||||
### Modifiers
|
||||
|
||||
- `reset`
|
||||
- `bold`
|
||||
- `dim`
|
||||
- `italic` *(Not widely supported)*
|
||||
- `underline`
|
||||
- `overline` *Supported on VTE-based terminals, the GNOME terminal, mintty, and Git Bash.*
|
||||
- `inverse`
|
||||
- `hidden`
|
||||
- `strikethrough` *(Not widely supported)*
|
||||
|
||||
### Colors
|
||||
|
||||
- `black`
|
||||
- `red`
|
||||
- `green`
|
||||
- `yellow`
|
||||
- `blue`
|
||||
- `magenta`
|
||||
- `cyan`
|
||||
- `white`
|
||||
- `blackBright` (alias: `gray`, `grey`)
|
||||
- `redBright`
|
||||
- `greenBright`
|
||||
- `yellowBright`
|
||||
- `blueBright`
|
||||
- `magentaBright`
|
||||
- `cyanBright`
|
||||
- `whiteBright`
|
||||
|
||||
### Background colors
|
||||
|
||||
- `bgBlack`
|
||||
- `bgRed`
|
||||
- `bgGreen`
|
||||
- `bgYellow`
|
||||
- `bgBlue`
|
||||
- `bgMagenta`
|
||||
- `bgCyan`
|
||||
- `bgWhite`
|
||||
- `bgBlackBright` (alias: `bgGray`, `bgGrey`)
|
||||
- `bgRedBright`
|
||||
- `bgGreenBright`
|
||||
- `bgYellowBright`
|
||||
- `bgBlueBright`
|
||||
- `bgMagentaBright`
|
||||
- `bgCyanBright`
|
||||
- `bgWhiteBright`
|
||||
|
||||
## Advanced usage
|
||||
|
||||
By default, you get a map of styles, but the styles are also available as groups. They are non-enumerable so they don't show up unless you access them explicitly. This makes it easier to expose only a subset in a higher-level module.
|
||||
|
||||
- `styles.modifier`
|
||||
- `styles.color`
|
||||
- `styles.bgColor`
|
||||
|
||||
###### Example
|
||||
|
||||
```js
|
||||
import styles from 'ansi-styles';
|
||||
|
||||
console.log(styles.color.green.open);
|
||||
```
|
||||
|
||||
Raw escape codes (i.e. without the CSI escape prefix `\u001B[` and render mode postfix `m`) are available under `styles.codes`, which returns a `Map` with the open codes as keys and close codes as values.
|
||||
|
||||
###### Example
|
||||
|
||||
```js
|
||||
import styles from 'ansi-styles';
|
||||
|
||||
console.log(styles.codes.get(36));
|
||||
//=> 39
|
||||
```
|
||||
|
||||
## 16 / 256 / 16 million (TrueColor) support
|
||||
|
||||
`ansi-styles` allows converting between various color formats and ANSI escapes, with support for 16, 256 and [16 million colors](https://gist.github.com/XVilka/8346728).
|
||||
|
||||
The following color spaces are supported:
|
||||
|
||||
- `rgb`
|
||||
- `hex`
|
||||
- `ansi256`
|
||||
- `ansi`
|
||||
|
||||
To use these, call the associated conversion function with the intended output, for example:
|
||||
|
||||
```js
|
||||
import styles from 'ansi-styles';
|
||||
|
||||
styles.color.ansi(styles.rgbToAnsi(100, 200, 15)); // RGB to 16 color ansi foreground code
|
||||
styles.bgColor.ansi(styles.hexToAnsi('#C0FFEE')); // HEX to 16 color ansi foreground code
|
||||
|
||||
styles.color.ansi256(styles.rgbToAnsi256(100, 200, 15)); // RGB to 256 color ansi foreground code
|
||||
styles.bgColor.ansi256(styles.hexToAnsi256('#C0FFEE')); // HEX to 256 color ansi foreground code
|
||||
|
||||
styles.color.ansi16m(100, 200, 15); // RGB to 16 million color foreground code
|
||||
styles.bgColor.ansi16m(...styles.hexToRgb('#C0FFEE')); // Hex (RGB) to 16 million color foreground code
|
||||
```
|
||||
|
||||
## Related
|
||||
|
||||
- [ansi-escapes](https://github.com/sindresorhus/ansi-escapes) - ANSI escape codes for manipulating the terminal
|
||||
|
||||
## Maintainers
|
||||
|
||||
- [Sindre Sorhus](https://github.com/sindresorhus)
|
||||
- [Josh Junon](https://github.com/qix-)
|
||||
|
||||
## For enterprise
|
||||
|
||||
Available as part of the Tidelift Subscription.
|
||||
|
||||
The maintainers of `ansi-styles` and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. [Learn more.](https://tidelift.com/subscription/pkg/npm-ansi-styles?utm_source=npm-ansi-styles&utm_medium=referral&utm_campaign=enterprise&utm_term=repo)
|
||||
157
Jira_helper/node_modules/cliui/CHANGELOG.md
generated
vendored
Normal file
157
Jira_helper/node_modules/cliui/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
# Change Log
|
||||
|
||||
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
|
||||
|
||||
## [9.0.1](https://github.com/yargs/cliui/compare/v9.0.0...v9.0.1) (2025-03-17)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* make require("cliui") work as expected for CJS ([04ccc25](https://github.com/yargs/cliui/commit/04ccc250e30a059292c03fa1ef0a8661f8d93dfe))
|
||||
|
||||
## [9.0.0](https://github.com/yargs/cliui/compare/v8.0.1...v9.0.0) (2025-03-16)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* cliui is now ESM only ([#165](https://github.com/yargs/cliui/issues/165))
|
||||
|
||||
### Features
|
||||
|
||||
* cliui is now ESM only ([#165](https://github.com/yargs/cliui/issues/165)) ([5a521de](https://github.com/yargs/cliui/commit/5a521de7ea88f262236394c8d96775bcf50ff0a4))
|
||||
|
||||
## [8.0.1](https://github.com/yargs/cliui/compare/v8.0.0...v8.0.1) (2022-10-01)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** move rollup-plugin-ts to dev deps ([#124](https://github.com/yargs/cliui/issues/124)) ([7c8bd6b](https://github.com/yargs/cliui/commit/7c8bd6ba024d61e4eeae310c7959ab8ab6829081))
|
||||
|
||||
## [8.0.0](https://github.com/yargs/cliui/compare/v7.0.4...v8.0.0) (2022-09-30)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* **deps:** drop Node 10 to release CVE-2021-3807 patch (#122)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** drop Node 10 to release CVE-2021-3807 patch ([#122](https://github.com/yargs/cliui/issues/122)) ([f156571](https://github.com/yargs/cliui/commit/f156571ce4f2ebf313335e3a53ad905589da5a30))
|
||||
|
||||
### [7.0.4](https://www.github.com/yargs/cliui/compare/v7.0.3...v7.0.4) (2020-11-08)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deno:** import UIOptions from definitions ([#97](https://www.github.com/yargs/cliui/issues/97)) ([f04f343](https://www.github.com/yargs/cliui/commit/f04f3439bc78114c7e90f82ff56f5acf16268ea8))
|
||||
|
||||
### [7.0.3](https://www.github.com/yargs/cliui/compare/v7.0.2...v7.0.3) (2020-10-16)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **exports:** node 13.0 and 13.1 require the dotted object form _with_ a string fallback ([#93](https://www.github.com/yargs/cliui/issues/93)) ([eca16fc](https://www.github.com/yargs/cliui/commit/eca16fc05d26255df3280906c36d7f0e5b05c6e9))
|
||||
|
||||
### [7.0.2](https://www.github.com/yargs/cliui/compare/v7.0.1...v7.0.2) (2020-10-14)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **exports:** node 13.0-13.6 require a string fallback ([#91](https://www.github.com/yargs/cliui/issues/91)) ([b529d7e](https://www.github.com/yargs/cliui/commit/b529d7e432901af1af7848b23ed6cf634497d961))
|
||||
|
||||
### [7.0.1](https://www.github.com/yargs/cliui/compare/v7.0.0...v7.0.1) (2020-08-16)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **build:** main should be build/index.cjs ([dc29a3c](https://www.github.com/yargs/cliui/commit/dc29a3cc617a410aa850e06337b5954b04f2cb4d))
|
||||
|
||||
## [7.0.0](https://www.github.com/yargs/cliui/compare/v6.0.0...v7.0.0) (2020-08-16)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* tsc/ESM/Deno support (#82)
|
||||
* modernize deps and build (#80)
|
||||
|
||||
### Build System
|
||||
|
||||
* modernize deps and build ([#80](https://www.github.com/yargs/cliui/issues/80)) ([339d08d](https://www.github.com/yargs/cliui/commit/339d08dc71b15a3928aeab09042af94db2f43743))
|
||||
|
||||
|
||||
### Code Refactoring
|
||||
|
||||
* tsc/ESM/Deno support ([#82](https://www.github.com/yargs/cliui/issues/82)) ([4b777a5](https://www.github.com/yargs/cliui/commit/4b777a5fe01c5d8958c6708695d6aab7dbe5706c))
|
||||
|
||||
## [6.0.0](https://www.github.com/yargs/cliui/compare/v5.0.0...v6.0.0) (2019-11-10)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* update deps, drop Node 6
|
||||
|
||||
### Code Refactoring
|
||||
|
||||
* update deps, drop Node 6 ([62056df](https://www.github.com/yargs/cliui/commit/62056df))
|
||||
|
||||
## [5.0.0](https://github.com/yargs/cliui/compare/v4.1.0...v5.0.0) (2019-04-10)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Update wrap-ansi to fix compatibility with latest versions of chalk. ([#60](https://github.com/yargs/cliui/issues/60)) ([7bf79ae](https://github.com/yargs/cliui/commit/7bf79ae))
|
||||
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
* Drop support for node < 6.
|
||||
|
||||
|
||||
|
||||
<a name="4.1.0"></a>
|
||||
## [4.1.0](https://github.com/yargs/cliui/compare/v4.0.0...v4.1.0) (2018-04-23)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add resetOutput method ([#57](https://github.com/yargs/cliui/issues/57)) ([7246902](https://github.com/yargs/cliui/commit/7246902))
|
||||
|
||||
|
||||
|
||||
<a name="4.0.0"></a>
|
||||
## [4.0.0](https://github.com/yargs/cliui/compare/v3.2.0...v4.0.0) (2017-12-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* downgrades strip-ansi to version 3.0.1 ([#54](https://github.com/yargs/cliui/issues/54)) ([5764c46](https://github.com/yargs/cliui/commit/5764c46))
|
||||
* set env variable FORCE_COLOR. ([#56](https://github.com/yargs/cliui/issues/56)) ([7350e36](https://github.com/yargs/cliui/commit/7350e36))
|
||||
|
||||
|
||||
### Chores
|
||||
|
||||
* drop support for node < 4 ([#53](https://github.com/yargs/cliui/issues/53)) ([b105376](https://github.com/yargs/cliui/commit/b105376))
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add fallback for window width ([#45](https://github.com/yargs/cliui/issues/45)) ([d064922](https://github.com/yargs/cliui/commit/d064922))
|
||||
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
* officially drop support for Node < 4
|
||||
|
||||
|
||||
|
||||
<a name="3.2.0"></a>
|
||||
## [3.2.0](https://github.com/yargs/cliui/compare/v3.1.2...v3.2.0) (2016-04-11)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* reduces tarball size ([acc6c33](https://github.com/yargs/cliui/commit/acc6c33))
|
||||
|
||||
### Features
|
||||
|
||||
* adds standard-version for release management ([ff84e32](https://github.com/yargs/cliui/commit/ff84e32))
|
||||
14
Jira_helper/node_modules/cliui/LICENSE.txt
generated
vendored
Normal file
14
Jira_helper/node_modules/cliui/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
Copyright (c) 2015, Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice
|
||||
appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE
|
||||
LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES
|
||||
OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
||||
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
|
||||
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
161
Jira_helper/node_modules/cliui/README.md
generated
vendored
Normal file
161
Jira_helper/node_modules/cliui/README.md
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
# cliui
|
||||
|
||||

|
||||
[](https://www.npmjs.com/package/cliui)
|
||||
[](https://conventionalcommits.org)
|
||||

|
||||
|
||||
easily create complex multi-column command-line-interfaces.
|
||||
|
||||
## Example
|
||||
|
||||
```bash
|
||||
npm i cliui@latest chalk@latest
|
||||
```
|
||||
|
||||
```js
|
||||
const ui = require('cliui')()
|
||||
const {Chalk} = require('chalk');
|
||||
const chalk = new Chalk();
|
||||
|
||||
ui.div('Usage: $0 [command] [options]')
|
||||
|
||||
ui.div({
|
||||
text: 'Options:',
|
||||
padding: [2, 0, 1, 0]
|
||||
})
|
||||
|
||||
ui.div(
|
||||
{
|
||||
text: "-f, --file",
|
||||
width: 20,
|
||||
padding: [0, 4, 0, 4]
|
||||
},
|
||||
{
|
||||
text: "the file to load." +
|
||||
chalk.green("(if this description is long it wraps).")
|
||||
,
|
||||
width: 20
|
||||
},
|
||||
{
|
||||
text: chalk.red("[required]"),
|
||||
align: 'right'
|
||||
}
|
||||
)
|
||||
|
||||
console.log(ui.toString())
|
||||
```
|
||||
|
||||
## Deno/ESM Support
|
||||
|
||||
As of `v7` `cliui` supports [Deno](https://github.com/denoland/deno) and
|
||||
[ESM](https://nodejs.org/api/esm.html#esm_ecmascript_modules):
|
||||
|
||||
```typescript
|
||||
import cliui from "cliui";
|
||||
import chalk from "chalk";
|
||||
// Deno: import cliui from "https://deno.land/x/cliui/deno.ts";
|
||||
|
||||
const ui = cliui({})
|
||||
|
||||
ui.div('Usage: $0 [command] [options]')
|
||||
|
||||
ui.div({
|
||||
text: 'Options:',
|
||||
padding: [2, 0, 1, 0]
|
||||
})
|
||||
|
||||
ui.div(
|
||||
{
|
||||
text: "-f, --file",
|
||||
width: 20,
|
||||
padding: [0, 4, 0, 4]
|
||||
},
|
||||
{
|
||||
text: "the file to load." +
|
||||
chalk.green("(if this description is long it wraps).")
|
||||
,
|
||||
width: 20
|
||||
},
|
||||
{
|
||||
text: chalk.red("[required]"),
|
||||
align: 'right'
|
||||
}
|
||||
)
|
||||
|
||||
console.log(ui.toString())
|
||||
```
|
||||
|
||||
<img width="500" src="screenshot.png">
|
||||
|
||||
## Layout DSL
|
||||
|
||||
cliui exposes a simple layout DSL:
|
||||
|
||||
If you create a single `ui.div`, passing a string rather than an
|
||||
object:
|
||||
|
||||
* `\n`: characters will be interpreted as new rows.
|
||||
* `\t`: characters will be interpreted as new columns.
|
||||
* `\s`: characters will be interpreted as padding.
|
||||
|
||||
**as an example...**
|
||||
|
||||
```js
|
||||
var ui = require('./')({
|
||||
width: 60
|
||||
})
|
||||
|
||||
ui.div(
|
||||
'Usage: node ./bin/foo.js\n' +
|
||||
' <regex>\t provide a regex\n' +
|
||||
' <glob>\t provide a glob\t [required]'
|
||||
)
|
||||
|
||||
console.log(ui.toString())
|
||||
```
|
||||
|
||||
**will output:**
|
||||
|
||||
```shell
|
||||
Usage: node ./bin/foo.js
|
||||
<regex> provide a regex
|
||||
<glob> provide a glob [required]
|
||||
```
|
||||
|
||||
## Methods
|
||||
|
||||
```js
|
||||
cliui = require('cliui')
|
||||
```
|
||||
|
||||
### cliui({width: integer})
|
||||
|
||||
Specify the maximum width of the UI being generated.
|
||||
If no width is provided, cliui will try to get the current window's width and use it, and if that doesn't work, width will be set to `80`.
|
||||
|
||||
### cliui({wrap: boolean})
|
||||
|
||||
Enable or disable the wrapping of text in a column.
|
||||
|
||||
### cliui.div(column, column, column)
|
||||
|
||||
Create a row with any number of columns, a column
|
||||
can either be a string, or an object with the following
|
||||
options:
|
||||
|
||||
* **text:** some text to place in the column.
|
||||
* **width:** the width of a column.
|
||||
* **align:** alignment, `right` or `center`.
|
||||
* **padding:** `[top, right, bottom, left]`.
|
||||
* **border:** should a border be placed around the div?
|
||||
|
||||
### cliui.span(column, column, column)
|
||||
|
||||
Similar to `div`, except the next row will be appended without
|
||||
a new line being created.
|
||||
|
||||
### cliui.resetOutput()
|
||||
|
||||
Resets the UI elements of the current cliui instance, maintaining the values
|
||||
set for `width` and `wrap`.
|
||||
287
Jira_helper/node_modules/cliui/build/lib/index.js
generated
vendored
Normal file
287
Jira_helper/node_modules/cliui/build/lib/index.js
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
'use strict';
|
||||
const align = {
|
||||
right: alignRight,
|
||||
center: alignCenter
|
||||
};
|
||||
const top = 0;
|
||||
const right = 1;
|
||||
const bottom = 2;
|
||||
const left = 3;
|
||||
export class UI {
|
||||
constructor(opts) {
|
||||
var _a;
|
||||
this.width = opts.width;
|
||||
this.wrap = (_a = opts.wrap) !== null && _a !== void 0 ? _a : true;
|
||||
this.rows = [];
|
||||
}
|
||||
span(...args) {
|
||||
const cols = this.div(...args);
|
||||
cols.span = true;
|
||||
}
|
||||
resetOutput() {
|
||||
this.rows = [];
|
||||
}
|
||||
div(...args) {
|
||||
if (args.length === 0) {
|
||||
this.div('');
|
||||
}
|
||||
if (this.wrap && this.shouldApplyLayoutDSL(...args) && typeof args[0] === 'string') {
|
||||
return this.applyLayoutDSL(args[0]);
|
||||
}
|
||||
const cols = args.map(arg => {
|
||||
if (typeof arg === 'string') {
|
||||
return this.colFromString(arg);
|
||||
}
|
||||
return arg;
|
||||
});
|
||||
this.rows.push(cols);
|
||||
return cols;
|
||||
}
|
||||
shouldApplyLayoutDSL(...args) {
|
||||
return args.length === 1 && typeof args[0] === 'string' &&
|
||||
/[\t\n]/.test(args[0]);
|
||||
}
|
||||
applyLayoutDSL(str) {
|
||||
const rows = str.split('\n').map(row => row.split('\t'));
|
||||
let leftColumnWidth = 0;
|
||||
// simple heuristic for layout, make sure the
|
||||
// second column lines up along the left-hand.
|
||||
// don't allow the first column to take up more
|
||||
// than 50% of the screen.
|
||||
rows.forEach(columns => {
|
||||
if (columns.length > 1 && mixin.stringWidth(columns[0]) > leftColumnWidth) {
|
||||
leftColumnWidth = Math.min(Math.floor(this.width * 0.5), mixin.stringWidth(columns[0]));
|
||||
}
|
||||
});
|
||||
// generate a table:
|
||||
// replacing ' ' with padding calculations.
|
||||
// using the algorithmically generated width.
|
||||
rows.forEach(columns => {
|
||||
this.div(...columns.map((r, i) => {
|
||||
return {
|
||||
text: r.trim(),
|
||||
padding: this.measurePadding(r),
|
||||
width: (i === 0 && columns.length > 1) ? leftColumnWidth : undefined
|
||||
};
|
||||
}));
|
||||
});
|
||||
return this.rows[this.rows.length - 1];
|
||||
}
|
||||
colFromString(text) {
|
||||
return {
|
||||
text,
|
||||
padding: this.measurePadding(text)
|
||||
};
|
||||
}
|
||||
measurePadding(str) {
|
||||
// measure padding without ansi escape codes
|
||||
const noAnsi = mixin.stripAnsi(str);
|
||||
return [0, noAnsi.match(/\s*$/)[0].length, 0, noAnsi.match(/^\s*/)[0].length];
|
||||
}
|
||||
toString() {
|
||||
const lines = [];
|
||||
this.rows.forEach(row => {
|
||||
this.rowToString(row, lines);
|
||||
});
|
||||
// don't display any lines with the
|
||||
// hidden flag set.
|
||||
return lines
|
||||
.filter(line => !line.hidden)
|
||||
.map(line => line.text)
|
||||
.join('\n');
|
||||
}
|
||||
rowToString(row, lines) {
|
||||
this.rasterize(row).forEach((rrow, r) => {
|
||||
let str = '';
|
||||
rrow.forEach((col, c) => {
|
||||
const { width } = row[c]; // the width with padding.
|
||||
const wrapWidth = this.negatePadding(row[c]); // the width without padding.
|
||||
let ts = col; // temporary string used during alignment/padding.
|
||||
if (wrapWidth > mixin.stringWidth(col)) {
|
||||
ts += ' '.repeat(wrapWidth - mixin.stringWidth(col));
|
||||
}
|
||||
// align the string within its column.
|
||||
if (row[c].align && row[c].align !== 'left' && this.wrap) {
|
||||
const fn = align[row[c].align];
|
||||
ts = fn(ts, wrapWidth);
|
||||
if (mixin.stringWidth(ts) < wrapWidth) {
|
||||
ts += ' '.repeat((width || 0) - mixin.stringWidth(ts) - 1);
|
||||
}
|
||||
}
|
||||
// apply border and padding to string.
|
||||
const padding = row[c].padding || [0, 0, 0, 0];
|
||||
if (padding[left]) {
|
||||
str += ' '.repeat(padding[left]);
|
||||
}
|
||||
str += addBorder(row[c], ts, '| ');
|
||||
str += ts;
|
||||
str += addBorder(row[c], ts, ' |');
|
||||
if (padding[right]) {
|
||||
str += ' '.repeat(padding[right]);
|
||||
}
|
||||
// if prior row is span, try to render the
|
||||
// current row on the prior line.
|
||||
if (r === 0 && lines.length > 0) {
|
||||
str = this.renderInline(str, lines[lines.length - 1]);
|
||||
}
|
||||
});
|
||||
// remove trailing whitespace.
|
||||
lines.push({
|
||||
text: str.replace(/ +$/, ''),
|
||||
span: row.span
|
||||
});
|
||||
});
|
||||
return lines;
|
||||
}
|
||||
// if the full 'source' can render in
|
||||
// the target line, do so.
|
||||
renderInline(source, previousLine) {
|
||||
const match = source.match(/^ */);
|
||||
const leadingWhitespace = match ? match[0].length : 0;
|
||||
const target = previousLine.text;
|
||||
const targetTextWidth = mixin.stringWidth(target.trimRight());
|
||||
if (!previousLine.span) {
|
||||
return source;
|
||||
}
|
||||
// if we're not applying wrapping logic,
|
||||
// just always append to the span.
|
||||
if (!this.wrap) {
|
||||
previousLine.hidden = true;
|
||||
return target + source;
|
||||
}
|
||||
if (leadingWhitespace < targetTextWidth) {
|
||||
return source;
|
||||
}
|
||||
previousLine.hidden = true;
|
||||
return target.trimRight() + ' '.repeat(leadingWhitespace - targetTextWidth) + source.trimLeft();
|
||||
}
|
||||
rasterize(row) {
|
||||
const rrows = [];
|
||||
const widths = this.columnWidths(row);
|
||||
let wrapped;
|
||||
// word wrap all columns, and create
|
||||
// a data-structure that is easy to rasterize.
|
||||
row.forEach((col, c) => {
|
||||
// leave room for left and right padding.
|
||||
col.width = widths[c];
|
||||
if (this.wrap) {
|
||||
wrapped = mixin.wrap(col.text, this.negatePadding(col), { hard: true }).split('\n');
|
||||
}
|
||||
else {
|
||||
wrapped = col.text.split('\n');
|
||||
}
|
||||
if (col.border) {
|
||||
wrapped.unshift('.' + '-'.repeat(this.negatePadding(col) + 2) + '.');
|
||||
wrapped.push("'" + '-'.repeat(this.negatePadding(col) + 2) + "'");
|
||||
}
|
||||
// add top and bottom padding.
|
||||
if (col.padding) {
|
||||
wrapped.unshift(...new Array(col.padding[top] || 0).fill(''));
|
||||
wrapped.push(...new Array(col.padding[bottom] || 0).fill(''));
|
||||
}
|
||||
wrapped.forEach((str, r) => {
|
||||
if (!rrows[r]) {
|
||||
rrows.push([]);
|
||||
}
|
||||
const rrow = rrows[r];
|
||||
for (let i = 0; i < c; i++) {
|
||||
if (rrow[i] === undefined) {
|
||||
rrow.push('');
|
||||
}
|
||||
}
|
||||
rrow.push(str);
|
||||
});
|
||||
});
|
||||
return rrows;
|
||||
}
|
||||
negatePadding(col) {
|
||||
let wrapWidth = col.width || 0;
|
||||
if (col.padding) {
|
||||
wrapWidth -= (col.padding[left] || 0) + (col.padding[right] || 0);
|
||||
}
|
||||
if (col.border) {
|
||||
wrapWidth -= 4;
|
||||
}
|
||||
return wrapWidth;
|
||||
}
|
||||
columnWidths(row) {
|
||||
if (!this.wrap) {
|
||||
return row.map(col => {
|
||||
return col.width || mixin.stringWidth(col.text);
|
||||
});
|
||||
}
|
||||
let unset = row.length;
|
||||
let remainingWidth = this.width;
|
||||
// column widths can be set in config.
|
||||
const widths = row.map(col => {
|
||||
if (col.width) {
|
||||
unset--;
|
||||
remainingWidth -= col.width;
|
||||
return col.width;
|
||||
}
|
||||
return undefined;
|
||||
});
|
||||
// any unset widths should be calculated.
|
||||
const unsetWidth = unset ? Math.floor(remainingWidth / unset) : 0;
|
||||
return widths.map((w, i) => {
|
||||
if (w === undefined) {
|
||||
return Math.max(unsetWidth, _minWidth(row[i]));
|
||||
}
|
||||
return w;
|
||||
});
|
||||
}
|
||||
}
|
||||
function addBorder(col, ts, style) {
|
||||
if (col.border) {
|
||||
if (/[.']-+[.']/.test(ts)) {
|
||||
return '';
|
||||
}
|
||||
if (ts.trim().length !== 0) {
|
||||
return style;
|
||||
}
|
||||
return ' ';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
// calculates the minimum width of
|
||||
// a column, based on padding preferences.
|
||||
function _minWidth(col) {
|
||||
const padding = col.padding || [];
|
||||
const minWidth = 1 + (padding[left] || 0) + (padding[right] || 0);
|
||||
if (col.border) {
|
||||
return minWidth + 4;
|
||||
}
|
||||
return minWidth;
|
||||
}
|
||||
function getWindowWidth() {
|
||||
/* c8 ignore next 5: depends on terminal */
|
||||
if (typeof process === 'object' && process.stdout && process.stdout.columns) {
|
||||
return process.stdout.columns;
|
||||
}
|
||||
return 80;
|
||||
}
|
||||
function alignRight(str, width) {
|
||||
str = str.trim();
|
||||
const strWidth = mixin.stringWidth(str);
|
||||
if (strWidth < width) {
|
||||
return ' '.repeat(width - strWidth) + str;
|
||||
}
|
||||
return str;
|
||||
}
|
||||
function alignCenter(str, width) {
|
||||
str = str.trim();
|
||||
const strWidth = mixin.stringWidth(str);
|
||||
/* c8 ignore next 3 */
|
||||
if (strWidth >= width) {
|
||||
return str;
|
||||
}
|
||||
return ' '.repeat((width - strWidth) >> 1) + str;
|
||||
}
|
||||
let mixin;
|
||||
export function cliui(opts, _mixin) {
|
||||
mixin = _mixin;
|
||||
return new UI({
|
||||
width: (opts === null || opts === void 0 ? void 0 : opts.width) || getWindowWidth(),
|
||||
wrap: opts === null || opts === void 0 ? void 0 : opts.wrap
|
||||
});
|
||||
}
|
||||
1
Jira_helper/node_modules/cliui/build/tsconfig.tsbuildinfo
generated
vendored
Normal file
1
Jira_helper/node_modules/cliui/build/tsconfig.tsbuildinfo
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
15
Jira_helper/node_modules/cliui/index.mjs
generated
vendored
Normal file
15
Jira_helper/node_modules/cliui/index.mjs
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Bootstrap cliui with CommonJS dependencies:
|
||||
import { cliui } from './build/lib/index.js'
|
||||
import stringWidth from 'string-width'
|
||||
import stripAnsi from 'strip-ansi'
|
||||
import wrapAnsi from 'wrap-ansi'
|
||||
|
||||
export default function ui (opts) {
|
||||
return cliui(opts, {
|
||||
stringWidth,
|
||||
stripAnsi,
|
||||
wrap: wrapAnsi
|
||||
})
|
||||
}
|
||||
|
||||
export {ui as 'module.exports'};
|
||||
72
Jira_helper/node_modules/cliui/package.json
generated
vendored
Normal file
72
Jira_helper/node_modules/cliui/package.json
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"name": "cliui",
|
||||
"version": "9.0.1",
|
||||
"description": "easily create complex multi-column command-line-interfaces",
|
||||
"main": "build/index.mjs",
|
||||
"exports": {
|
||||
".": "./index.mjs"
|
||||
},
|
||||
"type": "module",
|
||||
"module": "./index.mjs",
|
||||
"scripts": {
|
||||
"check": "standardx '**/*.ts' && standardx '**/*.js'",
|
||||
"fix": "standardx --fix '**/*.ts' && standardx --fix '**/*.js'",
|
||||
"pretest": "rimraf build && tsc -p tsconfig.test.json",
|
||||
"test": "c8 mocha ./test/*.mjs",
|
||||
"postest": "check",
|
||||
"coverage": "c8 report --check-coverage",
|
||||
"precompile": "rimraf build",
|
||||
"compile": "tsc",
|
||||
"prepare": "npm run compile"
|
||||
},
|
||||
"repository": "yargs/cliui",
|
||||
"standard": {
|
||||
"ignore": [
|
||||
"**/example/**"
|
||||
],
|
||||
"globals": [
|
||||
"it"
|
||||
]
|
||||
},
|
||||
"keywords": [
|
||||
"cli",
|
||||
"command-line",
|
||||
"layout",
|
||||
"design",
|
||||
"console",
|
||||
"wrap",
|
||||
"table"
|
||||
],
|
||||
"author": "Ben Coe <ben@npmjs.com>",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"string-width": "^7.2.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"wrap-ansi": "^9.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^22.13.10",
|
||||
"@typescript-eslint/eslint-plugin": "^4.0.0",
|
||||
"@typescript-eslint/parser": "^4.0.0",
|
||||
"c8": "^10.1.3",
|
||||
"chai": "^5.2.0",
|
||||
"chalk": "^5.4.1",
|
||||
"cross-env": "^7.0.2",
|
||||
"eslint": "^7.6.0",
|
||||
"eslint-plugin-import": "^2.22.0",
|
||||
"eslint-plugin-n": "^14.0.0",
|
||||
"gts": "^6.0.2",
|
||||
"mocha": "^11.1.0",
|
||||
"rimraf": "^6.0.1",
|
||||
"standardx": "^7.0.0",
|
||||
"typescript": "^5.8.2"
|
||||
},
|
||||
"files": [
|
||||
"build",
|
||||
"index.mjs",
|
||||
"!*.d.ts"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
}
|
||||
}
|
||||
21
Jira_helper/node_modules/csv-parse/LICENSE
generated
vendored
Normal file
21
Jira_helper/node_modules/csv-parse/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2010 Adaltas
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
82
Jira_helper/node_modules/csv-parse/README.md
generated
vendored
Normal file
82
Jira_helper/node_modules/csv-parse/README.md
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
# CSV parser for Node.js and the web
|
||||
|
||||
[](https://github.com/adaltas/node-csv/actions)
|
||||
[](https://www.npmjs.com/package/csv-parse)
|
||||
[](https://www.npmjs.com/package/csv-parse)
|
||||
|
||||
The [`csv-parse` package](https://csv.js.org/parse/) is a parser converting CSV text input into arrays or objects. It is part of the [CSV project](https://csv.js.org/).
|
||||
|
||||
It implements the Node.js [`stream.Transform` API](http://nodejs.org/api/stream.html#stream_class_stream_transform). It also provides a simple callback-based API for convenience. It is both extremely easy to use and powerful. It was first released in 2010 and is used against big data sets by a large community.
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Project homepage](https://csv.js.org/parse/)
|
||||
- [API](https://csv.js.org/parse/api/)
|
||||
- [Options](https://csv.js.org/parse/options/)
|
||||
- [Info properties](https://csv.js.org/parse/info/)
|
||||
- [Common errors](https://csv.js.org/parse/errors/)
|
||||
- [Examples](https://csv.js.org/project/examples/)
|
||||
|
||||
## Main features
|
||||
|
||||
- Flexible with lot of [options](https://csv.js.org/parse/options/)
|
||||
- Multiple [distributions](https://csv.js.org/parse/distributions/): Node.js, Web, ECMAScript modules and CommonJS
|
||||
- Follow the Node.js streaming API
|
||||
- Simplicity with the optional callback API
|
||||
- Support delimiters, quotes, escape characters and comments
|
||||
- Line breaks discovery
|
||||
- Support big datasets
|
||||
- Complete test coverage and lot of samples for inspiration
|
||||
- No external dependencies
|
||||
- Work nicely with the [csv-generate](https://csv.js.org/generate/), [stream-transform](https://csv.js.org/transform/) and [csv-stringify](https://csv.js.org/stringify/) packages
|
||||
- MIT License
|
||||
|
||||
## Usage
|
||||
|
||||
Run `npm install csv` to install the full CSV module or run `npm install csv-parse` if you are only interested by the CSV parser.
|
||||
|
||||
Use the callback and sync APIs for simplicity or the stream based API for scalability.
|
||||
|
||||
## Example
|
||||
|
||||
The [API](https://csv.js.org/parse/api/) is available in multiple flavors. This example illustrates the stream API.
|
||||
|
||||
```js
|
||||
import assert from "assert";
|
||||
import { parse } from "csv-parse";
|
||||
|
||||
const records = [];
|
||||
// Initialize the parser
|
||||
const parser = parse({
|
||||
delimiter: ":",
|
||||
});
|
||||
// Use the readable stream api to consume records
|
||||
parser.on("readable", function () {
|
||||
let record;
|
||||
while ((record = parser.read()) !== null) {
|
||||
records.push(record);
|
||||
}
|
||||
});
|
||||
// Catch any error
|
||||
parser.on("error", function (err) {
|
||||
console.error(err.message);
|
||||
});
|
||||
// Test that the parsed records matched the expected records
|
||||
parser.on("end", function () {
|
||||
assert.deepStrictEqual(records, [
|
||||
["root", "x", "0", "0", "root", "/root", "/bin/bash"],
|
||||
["someone", "x", "1022", "1022", "", "/home/someone", "/bin/bash"],
|
||||
]);
|
||||
});
|
||||
// Write data to the stream
|
||||
parser.write("root:x:0:0:root:/root:/bin/bash\n");
|
||||
parser.write("someone:x:1022:1022::/home/someone:/bin/bash\n");
|
||||
// Close the readable stream
|
||||
parser.end();
|
||||
```
|
||||
|
||||
## Contributors
|
||||
|
||||
The project is sponsored by [Adaltas](https://www.adaltas.com), an Big Data consulting firm based in Paris, France.
|
||||
|
||||
- David Worms: <https://github.com/wdavidw>
|
||||
1898
Jira_helper/node_modules/csv-parse/dist/cjs/index.cjs
generated
vendored
Normal file
1898
Jira_helper/node_modules/csv-parse/dist/cjs/index.cjs
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
507
Jira_helper/node_modules/csv-parse/dist/cjs/index.d.cts
generated
vendored
Normal file
507
Jira_helper/node_modules/csv-parse/dist/cjs/index.d.cts
generated
vendored
Normal file
@@ -0,0 +1,507 @@
|
||||
// Original definitions in https://github.com/DefinitelyTyped/DefinitelyTyped by: David Muller <https://github.com/davidm77>
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import * as stream from "stream";
|
||||
|
||||
export type Callback<T = string[]> = (
|
||||
err: CsvError | undefined,
|
||||
records: T[],
|
||||
info?: Info,
|
||||
) => void;
|
||||
|
||||
// export interface Parser extends stream.Transform {}
|
||||
|
||||
// export class Parser<T> extends stream.Transform {
|
||||
export class Parser extends stream.Transform {
|
||||
constructor(options: Options);
|
||||
|
||||
// __push(line: T): CsvError | undefined;
|
||||
__push(line: any): CsvError | undefined;
|
||||
|
||||
// __write(chars: any, end: any, callback: any): any;
|
||||
|
||||
readonly options: OptionsNormalized;
|
||||
|
||||
readonly info: Info;
|
||||
}
|
||||
|
||||
export interface Info {
|
||||
/**
|
||||
* Count the number of lines being fully commented.
|
||||
*/
|
||||
readonly comment_lines: number;
|
||||
/**
|
||||
* Count the number of processed empty lines.
|
||||
*/
|
||||
readonly empty_lines: number;
|
||||
/**
|
||||
* The number of lines encountered in the source dataset, start at 1 for the first line.
|
||||
*/
|
||||
readonly lines: number;
|
||||
/**
|
||||
* Count the number of processed records.
|
||||
*/
|
||||
readonly records: number;
|
||||
/**
|
||||
* Count of the number of processed bytes.
|
||||
*/
|
||||
readonly bytes: number;
|
||||
/**
|
||||
* Number of non uniform records when `relax_column_count` is true.
|
||||
*/
|
||||
readonly invalid_field_length: number;
|
||||
/**
|
||||
* Normalized verion of `options.columns` when `options.columns` is true, boolean otherwise.
|
||||
*/
|
||||
readonly columns: boolean | { name: string }[] | { disabled: true }[];
|
||||
}
|
||||
|
||||
export interface CastingContext {
|
||||
readonly column: number | string;
|
||||
readonly empty_lines: number;
|
||||
readonly error: CsvError;
|
||||
readonly header: boolean;
|
||||
readonly index: number;
|
||||
readonly quoting: boolean;
|
||||
readonly lines: number;
|
||||
readonly raw: string | undefined;
|
||||
readonly records: number;
|
||||
readonly invalid_field_length: number;
|
||||
}
|
||||
|
||||
export type CastingFunction = (
|
||||
value: string,
|
||||
context: CastingContext,
|
||||
) => unknown;
|
||||
|
||||
export type CastingDateFunction = (
|
||||
value: string,
|
||||
context: CastingContext,
|
||||
) => Date;
|
||||
|
||||
export type ColumnOption<K = string> =
|
||||
| K
|
||||
| undefined
|
||||
| null
|
||||
| false
|
||||
| { name: K };
|
||||
|
||||
export interface OptionsNormalized<T = string[]> {
|
||||
auto_parse?: boolean | CastingFunction;
|
||||
auto_parse_date?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* If true, detect and exclude the byte order mark (BOM) from the CSV input if present.
|
||||
*/
|
||||
bom?: boolean;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to native types.
|
||||
* If a function, receive the value as first argument, a context as second argument and return a new value. More information about the context properties is available below.
|
||||
*/
|
||||
cast?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to dates.
|
||||
* If a function, receive the value as argument and return a new value. It requires the "auto_parse" option. Be careful, it relies on Date.parse.
|
||||
*/
|
||||
cast_date?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* Internal property string the function to
|
||||
*/
|
||||
cast_first_line_to_header?: (
|
||||
record: T,
|
||||
) => ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[];
|
||||
/**
|
||||
* List of fields as an array, a user defined callback accepting the first
|
||||
* line and returning the column names or true if autodiscovered in the first
|
||||
* CSV line, default to null, affect the result data set in the sense that
|
||||
* records will be objects instead of arrays.
|
||||
*/
|
||||
columns:
|
||||
| boolean
|
||||
| ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[];
|
||||
/**
|
||||
* Convert values into an array of values when columns are activated and
|
||||
* when multiple columns of the same name are found.
|
||||
*/
|
||||
group_columns_by_name: boolean;
|
||||
/**
|
||||
* Treat all the characters after this one as a comment, default to '' (disabled).
|
||||
*/
|
||||
comment: string | null;
|
||||
/**
|
||||
* Restrict the definition of comments to a full line. Comment characters
|
||||
* defined in the middle of the line are not interpreted as such. The
|
||||
* option require the activation of comments.
|
||||
*/
|
||||
comment_no_infix: boolean;
|
||||
/**
|
||||
* Set the field delimiter. One character only, defaults to comma.
|
||||
*/
|
||||
delimiter: Buffer[];
|
||||
/**
|
||||
* Set the source and destination encoding, a value of `null` returns buffer instead of strings.
|
||||
*/
|
||||
encoding: BufferEncoding | null;
|
||||
/**
|
||||
* Set the escape character, one character only, defaults to double quotes.
|
||||
*/
|
||||
escape: null | Buffer;
|
||||
/**
|
||||
* Start handling records from the requested number of records.
|
||||
*/
|
||||
from: number;
|
||||
/**
|
||||
* Start handling records from the requested line number.
|
||||
*/
|
||||
from_line: number;
|
||||
/**
|
||||
* Don't interpret delimiters as such in the last field according to the number of fields calculated from the number of columns, the option require the presence of the `column` option when `true`.
|
||||
*/
|
||||
ignore_last_delimiters: boolean | number;
|
||||
/**
|
||||
* Generate two properties `info` and `record` where `info` is a snapshot of the info object at the time the record was created and `record` is the parsed array or object.
|
||||
*/
|
||||
info: boolean;
|
||||
/**
|
||||
* If true, ignore whitespace immediately following the delimiter (i.e. left-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
ltrim: boolean;
|
||||
/**
|
||||
* Maximum numer of characters to be contained in the field and line buffers before an exception is raised,
|
||||
* used to guard against a wrong delimiter or record_delimiter,
|
||||
* default to 128000 characters.
|
||||
*/
|
||||
max_record_size: number;
|
||||
/**
|
||||
* Name of header-record title to name objects by.
|
||||
*/
|
||||
objname: number | string | undefined;
|
||||
/**
|
||||
* Alter and filter records by executing a user defined function.
|
||||
*/
|
||||
on_record?: (record: T, context: CastingContext) => T | undefined;
|
||||
/**
|
||||
* Optional character surrounding a field, one character only, defaults to double quotes.
|
||||
*/
|
||||
quote?: Buffer | null;
|
||||
/**
|
||||
* Generate two properties raw and row where raw is the original CSV row content and row is the parsed array or object.
|
||||
*/
|
||||
raw: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count, default to false.
|
||||
*/
|
||||
relax_column_count: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains less fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_less: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains more fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_more: boolean;
|
||||
/**
|
||||
* Preserve quotes inside unquoted field.
|
||||
*/
|
||||
relax_quotes: boolean;
|
||||
/**
|
||||
* One or multiple characters used to delimit record rows; defaults to auto discovery if not provided.
|
||||
* Supported auto discovery method are Linux ("\n"), Apple ("\r") and Windows ("\r\n") row delimiters.
|
||||
*/
|
||||
record_delimiter: Buffer[];
|
||||
/**
|
||||
* If true, ignore whitespace immediately preceding the delimiter (i.e. right-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
rtrim: boolean;
|
||||
/**
|
||||
* Dont generate empty values for empty lines.
|
||||
* Defaults to false
|
||||
*/
|
||||
skip_empty_lines: boolean;
|
||||
/**
|
||||
* Skip a line with error found inside and directly go process the next line.
|
||||
*/
|
||||
skip_records_with_error: boolean;
|
||||
/**
|
||||
* Don't generate records for lines containing empty column values (column matching /\s*\/), defaults to false.
|
||||
*/
|
||||
skip_records_with_empty_values: boolean;
|
||||
/**
|
||||
* Stop handling records after the requested number of records.
|
||||
*/
|
||||
to: number;
|
||||
/**
|
||||
* Stop handling records after the requested line number.
|
||||
*/
|
||||
to_line: number;
|
||||
/**
|
||||
* If true, ignore whitespace immediately around the delimiter, defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
trim: boolean;
|
||||
}
|
||||
|
||||
/*
|
||||
Note, could not `extends stream.TransformOptions` because encoding can be
|
||||
BufferEncoding and undefined as well as null which is not defined in the
|
||||
extended type.
|
||||
*/
|
||||
export interface Options<T = string[]> {
|
||||
/**
|
||||
* If true, the parser will attempt to convert read data types to native types.
|
||||
* @deprecated Use {@link cast}
|
||||
*/
|
||||
auto_parse?: boolean | CastingFunction;
|
||||
autoParse?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert read data types to dates. It requires the "auto_parse" option.
|
||||
* @deprecated Use {@link cast_date}
|
||||
*/
|
||||
auto_parse_date?: boolean | CastingDateFunction;
|
||||
autoParseDate?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* If true, detect and exclude the byte order mark (BOM) from the CSV input if present.
|
||||
*/
|
||||
bom?: boolean;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to native types.
|
||||
* If a function, receive the value as first argument, a context as second argument and return a new value. More information about the context properties is available below.
|
||||
*/
|
||||
cast?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to dates.
|
||||
* If a function, receive the value as argument and return a new value. It requires the "auto_parse" option. Be careful, it relies on Date.parse.
|
||||
*/
|
||||
cast_date?: boolean | CastingDateFunction;
|
||||
castDate?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* List of fields as an array,
|
||||
* a user defined callback accepting the first line and returning the column names or true if autodiscovered in the first CSV line,
|
||||
* default to null,
|
||||
* affect the result data set in the sense that records will be objects instead of arrays.
|
||||
*/
|
||||
columns?:
|
||||
| boolean
|
||||
| ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[]
|
||||
| ((
|
||||
record: T,
|
||||
) => ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[]);
|
||||
/**
|
||||
* Convert values into an array of values when columns are activated and
|
||||
* when multiple columns of the same name are found.
|
||||
*/
|
||||
group_columns_by_name?: boolean;
|
||||
groupColumnsByName?: boolean;
|
||||
/**
|
||||
* Treat all the characters after this one as a comment, default to '' (disabled).
|
||||
*/
|
||||
comment?: string | boolean | null;
|
||||
/**
|
||||
* Restrict the definition of comments to a full line. Comment characters
|
||||
* defined in the middle of the line are not interpreted as such. The
|
||||
* option require the activation of comments.
|
||||
*/
|
||||
comment_no_infix?: boolean | null;
|
||||
/**
|
||||
* Set the field delimiter. One character only, defaults to comma.
|
||||
*/
|
||||
delimiter?: string | string[] | Buffer;
|
||||
/**
|
||||
* Set the source and destination encoding, a value of `null` returns buffer instead of strings.
|
||||
*/
|
||||
encoding?: BufferEncoding | boolean | null | undefined;
|
||||
/**
|
||||
* Set the escape character, one character only, defaults to double quotes.
|
||||
*/
|
||||
escape?: string | null | boolean | Buffer;
|
||||
/**
|
||||
* Start handling records from the requested number of records.
|
||||
*/
|
||||
from?: number | string;
|
||||
/**
|
||||
* Start handling records from the requested line number.
|
||||
*/
|
||||
from_line?: null | number | string;
|
||||
fromLine?: null | number | string;
|
||||
/**
|
||||
* Don't interpret delimiters as such in the last field according to the number of fields calculated from the number of columns, the option require the presence of the `column` option when `true`.
|
||||
*/
|
||||
ignore_last_delimiters?: boolean | number;
|
||||
/**
|
||||
* Generate two properties `info` and `record` where `info` is a snapshot of the info object at the time the record was created and `record` is the parsed array or object.
|
||||
*/
|
||||
info?: boolean;
|
||||
/**
|
||||
* If true, ignore whitespace immediately following the delimiter (i.e. left-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
ltrim?: boolean | null;
|
||||
/**
|
||||
* Maximum numer of characters to be contained in the field and line buffers before an exception is raised,
|
||||
* used to guard against a wrong delimiter or record_delimiter,
|
||||
* default to 128000 characters.
|
||||
*/
|
||||
max_record_size?: number | null | string;
|
||||
maxRecordSize?: number;
|
||||
/**
|
||||
* Name of header-record title to name objects by.
|
||||
*/
|
||||
objname?: Buffer | null | number | string;
|
||||
/**
|
||||
* Alter and filter records by executing a user defined function.
|
||||
*/
|
||||
on_record?: (record: T, context: CastingContext) => T | null | undefined;
|
||||
onRecord?: (record: T, context: CastingContext) => T | null | undefined;
|
||||
/**
|
||||
* Function called when an error occured if the `skip_records_with_error`
|
||||
* option is activated.
|
||||
*/
|
||||
on_skip?: (err: CsvError | undefined, raw: string | undefined) => undefined;
|
||||
onSkip?: (err: CsvError | undefined, raw: string | undefined) => undefined;
|
||||
/**
|
||||
* Optional character surrounding a field, one character only, defaults to double quotes.
|
||||
*/
|
||||
quote?: string | boolean | Buffer | null;
|
||||
/**
|
||||
* Generate two properties raw and row where raw is the original CSV row content and row is the parsed array or object.
|
||||
*/
|
||||
raw?: boolean | null;
|
||||
/**
|
||||
* One or multiple characters used to delimit record rows; defaults to auto discovery if not provided.
|
||||
* Supported auto discovery method are Linux ("\n"), Apple ("\r") and Windows ("\r\n") row delimiters.
|
||||
*/
|
||||
record_delimiter?: string | Buffer | null | (string | Buffer | null)[];
|
||||
recordDelimiter?: string | Buffer | null | (string | Buffer | null)[];
|
||||
/**
|
||||
* Discard inconsistent columns count, default to false.
|
||||
*/
|
||||
relax_column_count?: boolean | null;
|
||||
relaxColumnCount?: boolean | null;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains less fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_less?: boolean | null;
|
||||
relaxColumnCountLess?: boolean | null;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains more fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_more?: boolean | null;
|
||||
relaxColumnCountMore?: boolean | null;
|
||||
/**
|
||||
* Preserve quotes inside unquoted field.
|
||||
*/
|
||||
relax_quotes?: boolean | null;
|
||||
relaxQuotes?: boolean | null;
|
||||
/**
|
||||
* If true, ignore whitespace immediately preceding the delimiter (i.e. right-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
rtrim?: boolean | null;
|
||||
/**
|
||||
* Dont generate empty values for empty lines.
|
||||
* Defaults to false
|
||||
*/
|
||||
skip_empty_lines?: boolean | null;
|
||||
skipEmptyLines?: boolean | null;
|
||||
/**
|
||||
* Don't generate records for lines containing empty column values (column matching /\s*\/), defaults to false.
|
||||
*/
|
||||
skip_records_with_empty_values?: boolean | null;
|
||||
skipRecordsWithEmptyValues?: boolean | null;
|
||||
/**
|
||||
* Skip a line with error found inside and directly go process the next line.
|
||||
*/
|
||||
skip_records_with_error?: boolean | null;
|
||||
skipRecordsWithError?: boolean | null;
|
||||
/**
|
||||
* Stop handling records after the requested number of records.
|
||||
*/
|
||||
to?: null | number | string;
|
||||
/**
|
||||
* Stop handling records after the requested line number.
|
||||
*/
|
||||
to_line?: null | number | string;
|
||||
toLine?: null | number | string;
|
||||
/**
|
||||
* If true, ignore whitespace immediately around the delimiter, defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
trim?: boolean | null;
|
||||
}
|
||||
|
||||
export type CsvErrorCode =
|
||||
| "CSV_INVALID_ARGUMENT"
|
||||
| "CSV_INVALID_CLOSING_QUOTE"
|
||||
| "CSV_INVALID_COLUMN_DEFINITION"
|
||||
| "CSV_INVALID_COLUMN_MAPPING"
|
||||
| "CSV_INVALID_OPTION_BOM"
|
||||
| "CSV_INVALID_OPTION_CAST"
|
||||
| "CSV_INVALID_OPTION_CAST_DATE"
|
||||
| "CSV_INVALID_OPTION_COLUMNS"
|
||||
| "CSV_INVALID_OPTION_COMMENT"
|
||||
| "CSV_INVALID_OPTION_DELIMITER"
|
||||
| "CSV_INVALID_OPTION_GROUP_COLUMNS_BY_NAME"
|
||||
| "CSV_INVALID_OPTION_ON_RECORD"
|
||||
| "CSV_MAX_RECORD_SIZE"
|
||||
| "CSV_NON_TRIMABLE_CHAR_AFTER_CLOSING_QUOTE"
|
||||
| "CSV_OPTION_COLUMNS_MISSING_NAME"
|
||||
| "CSV_QUOTE_NOT_CLOSED"
|
||||
| "CSV_RECORD_INCONSISTENT_FIELDS_LENGTH"
|
||||
| "CSV_RECORD_INCONSISTENT_COLUMNS"
|
||||
| "CSV_UNKNOWN_ERROR"
|
||||
| "INVALID_OPENING_QUOTE";
|
||||
|
||||
export class CsvError extends Error {
|
||||
readonly code: CsvErrorCode;
|
||||
[key: string]: any;
|
||||
|
||||
constructor(
|
||||
code: CsvErrorCode,
|
||||
message: string | string[],
|
||||
options?: OptionsNormalized,
|
||||
...contexts: unknown[]
|
||||
);
|
||||
}
|
||||
|
||||
type OptionsWithColumns<T> = Omit<Options<T>, "columns"> & {
|
||||
columns: Exclude<Options["columns"], undefined | false>;
|
||||
};
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
input: string | Buffer | Uint8Array,
|
||||
options: OptionsWithColumns<T>,
|
||||
callback?: Callback<T>,
|
||||
): Parser;
|
||||
declare function parse(
|
||||
input: string | Buffer | Uint8Array,
|
||||
options: Options,
|
||||
callback?: Callback,
|
||||
): Parser;
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
options: OptionsWithColumns<T>,
|
||||
callback?: Callback<T>,
|
||||
): Parser;
|
||||
declare function parse(options: Options, callback?: Callback): Parser;
|
||||
|
||||
declare function parse(
|
||||
input: string | Buffer | Uint8Array,
|
||||
callback?: Callback,
|
||||
): Parser;
|
||||
declare function parse(callback?: Callback): Parser;
|
||||
|
||||
// export default parse;
|
||||
export { parse };
|
||||
|
||||
declare function normalize_options(opts: Options): OptionsNormalized;
|
||||
export { normalize_options };
|
||||
1786
Jira_helper/node_modules/csv-parse/dist/cjs/sync.cjs
generated
vendored
Normal file
1786
Jira_helper/node_modules/csv-parse/dist/cjs/sync.cjs
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
30
Jira_helper/node_modules/csv-parse/dist/cjs/sync.d.cts
generated
vendored
Normal file
30
Jira_helper/node_modules/csv-parse/dist/cjs/sync.d.cts
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
import { Options } from "./index.cjs";
|
||||
|
||||
type OptionsWithColumns<T> = Omit<Options<T>, "columns"> & {
|
||||
columns: Exclude<Options["columns"], undefined | false>;
|
||||
};
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
input: Buffer | string | Uint8Array,
|
||||
options: OptionsWithColumns<T>,
|
||||
): T[];
|
||||
declare function parse(
|
||||
input: Buffer | string | Uint8Array,
|
||||
options: Options,
|
||||
): string[][];
|
||||
declare function parse(input: Buffer | string | Uint8Array): string[][];
|
||||
|
||||
// export default parse;
|
||||
export { parse };
|
||||
|
||||
export {
|
||||
CastingContext,
|
||||
CastingFunction,
|
||||
CastingDateFunction,
|
||||
ColumnOption,
|
||||
Options,
|
||||
OptionsNormalized,
|
||||
Info,
|
||||
CsvErrorCode,
|
||||
CsvError,
|
||||
} from "./index.cjs";
|
||||
507
Jira_helper/node_modules/csv-parse/dist/esm/index.d.ts
generated
vendored
Normal file
507
Jira_helper/node_modules/csv-parse/dist/esm/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,507 @@
|
||||
// Original definitions in https://github.com/DefinitelyTyped/DefinitelyTyped by: David Muller <https://github.com/davidm77>
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import * as stream from "stream";
|
||||
|
||||
export type Callback<T = string[]> = (
|
||||
err: CsvError | undefined,
|
||||
records: T[],
|
||||
info?: Info,
|
||||
) => void;
|
||||
|
||||
// export interface Parser extends stream.Transform {}
|
||||
|
||||
// export class Parser<T> extends stream.Transform {
|
||||
export class Parser extends stream.Transform {
|
||||
constructor(options: Options);
|
||||
|
||||
// __push(line: T): CsvError | undefined;
|
||||
__push(line: any): CsvError | undefined;
|
||||
|
||||
// __write(chars: any, end: any, callback: any): any;
|
||||
|
||||
readonly options: OptionsNormalized;
|
||||
|
||||
readonly info: Info;
|
||||
}
|
||||
|
||||
export interface Info {
|
||||
/**
|
||||
* Count the number of lines being fully commented.
|
||||
*/
|
||||
readonly comment_lines: number;
|
||||
/**
|
||||
* Count the number of processed empty lines.
|
||||
*/
|
||||
readonly empty_lines: number;
|
||||
/**
|
||||
* The number of lines encountered in the source dataset, start at 1 for the first line.
|
||||
*/
|
||||
readonly lines: number;
|
||||
/**
|
||||
* Count the number of processed records.
|
||||
*/
|
||||
readonly records: number;
|
||||
/**
|
||||
* Count of the number of processed bytes.
|
||||
*/
|
||||
readonly bytes: number;
|
||||
/**
|
||||
* Number of non uniform records when `relax_column_count` is true.
|
||||
*/
|
||||
readonly invalid_field_length: number;
|
||||
/**
|
||||
* Normalized verion of `options.columns` when `options.columns` is true, boolean otherwise.
|
||||
*/
|
||||
readonly columns: boolean | { name: string }[] | { disabled: true }[];
|
||||
}
|
||||
|
||||
export interface CastingContext {
|
||||
readonly column: number | string;
|
||||
readonly empty_lines: number;
|
||||
readonly error: CsvError;
|
||||
readonly header: boolean;
|
||||
readonly index: number;
|
||||
readonly quoting: boolean;
|
||||
readonly lines: number;
|
||||
readonly raw: string | undefined;
|
||||
readonly records: number;
|
||||
readonly invalid_field_length: number;
|
||||
}
|
||||
|
||||
export type CastingFunction = (
|
||||
value: string,
|
||||
context: CastingContext,
|
||||
) => unknown;
|
||||
|
||||
export type CastingDateFunction = (
|
||||
value: string,
|
||||
context: CastingContext,
|
||||
) => Date;
|
||||
|
||||
export type ColumnOption<K = string> =
|
||||
| K
|
||||
| undefined
|
||||
| null
|
||||
| false
|
||||
| { name: K };
|
||||
|
||||
export interface OptionsNormalized<T = string[]> {
|
||||
auto_parse?: boolean | CastingFunction;
|
||||
auto_parse_date?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* If true, detect and exclude the byte order mark (BOM) from the CSV input if present.
|
||||
*/
|
||||
bom?: boolean;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to native types.
|
||||
* If a function, receive the value as first argument, a context as second argument and return a new value. More information about the context properties is available below.
|
||||
*/
|
||||
cast?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to dates.
|
||||
* If a function, receive the value as argument and return a new value. It requires the "auto_parse" option. Be careful, it relies on Date.parse.
|
||||
*/
|
||||
cast_date?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* Internal property string the function to
|
||||
*/
|
||||
cast_first_line_to_header?: (
|
||||
record: T,
|
||||
) => ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[];
|
||||
/**
|
||||
* List of fields as an array, a user defined callback accepting the first
|
||||
* line and returning the column names or true if autodiscovered in the first
|
||||
* CSV line, default to null, affect the result data set in the sense that
|
||||
* records will be objects instead of arrays.
|
||||
*/
|
||||
columns:
|
||||
| boolean
|
||||
| ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[];
|
||||
/**
|
||||
* Convert values into an array of values when columns are activated and
|
||||
* when multiple columns of the same name are found.
|
||||
*/
|
||||
group_columns_by_name: boolean;
|
||||
/**
|
||||
* Treat all the characters after this one as a comment, default to '' (disabled).
|
||||
*/
|
||||
comment: string | null;
|
||||
/**
|
||||
* Restrict the definition of comments to a full line. Comment characters
|
||||
* defined in the middle of the line are not interpreted as such. The
|
||||
* option require the activation of comments.
|
||||
*/
|
||||
comment_no_infix: boolean;
|
||||
/**
|
||||
* Set the field delimiter. One character only, defaults to comma.
|
||||
*/
|
||||
delimiter: Buffer[];
|
||||
/**
|
||||
* Set the source and destination encoding, a value of `null` returns buffer instead of strings.
|
||||
*/
|
||||
encoding: BufferEncoding | null;
|
||||
/**
|
||||
* Set the escape character, one character only, defaults to double quotes.
|
||||
*/
|
||||
escape: null | Buffer;
|
||||
/**
|
||||
* Start handling records from the requested number of records.
|
||||
*/
|
||||
from: number;
|
||||
/**
|
||||
* Start handling records from the requested line number.
|
||||
*/
|
||||
from_line: number;
|
||||
/**
|
||||
* Don't interpret delimiters as such in the last field according to the number of fields calculated from the number of columns, the option require the presence of the `column` option when `true`.
|
||||
*/
|
||||
ignore_last_delimiters: boolean | number;
|
||||
/**
|
||||
* Generate two properties `info` and `record` where `info` is a snapshot of the info object at the time the record was created and `record` is the parsed array or object.
|
||||
*/
|
||||
info: boolean;
|
||||
/**
|
||||
* If true, ignore whitespace immediately following the delimiter (i.e. left-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
ltrim: boolean;
|
||||
/**
|
||||
* Maximum numer of characters to be contained in the field and line buffers before an exception is raised,
|
||||
* used to guard against a wrong delimiter or record_delimiter,
|
||||
* default to 128000 characters.
|
||||
*/
|
||||
max_record_size: number;
|
||||
/**
|
||||
* Name of header-record title to name objects by.
|
||||
*/
|
||||
objname: number | string | undefined;
|
||||
/**
|
||||
* Alter and filter records by executing a user defined function.
|
||||
*/
|
||||
on_record?: (record: T, context: CastingContext) => T | undefined;
|
||||
/**
|
||||
* Optional character surrounding a field, one character only, defaults to double quotes.
|
||||
*/
|
||||
quote?: Buffer | null;
|
||||
/**
|
||||
* Generate two properties raw and row where raw is the original CSV row content and row is the parsed array or object.
|
||||
*/
|
||||
raw: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count, default to false.
|
||||
*/
|
||||
relax_column_count: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains less fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_less: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains more fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_more: boolean;
|
||||
/**
|
||||
* Preserve quotes inside unquoted field.
|
||||
*/
|
||||
relax_quotes: boolean;
|
||||
/**
|
||||
* One or multiple characters used to delimit record rows; defaults to auto discovery if not provided.
|
||||
* Supported auto discovery method are Linux ("\n"), Apple ("\r") and Windows ("\r\n") row delimiters.
|
||||
*/
|
||||
record_delimiter: Buffer[];
|
||||
/**
|
||||
* If true, ignore whitespace immediately preceding the delimiter (i.e. right-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
rtrim: boolean;
|
||||
/**
|
||||
* Dont generate empty values for empty lines.
|
||||
* Defaults to false
|
||||
*/
|
||||
skip_empty_lines: boolean;
|
||||
/**
|
||||
* Skip a line with error found inside and directly go process the next line.
|
||||
*/
|
||||
skip_records_with_error: boolean;
|
||||
/**
|
||||
* Don't generate records for lines containing empty column values (column matching /\s*\/), defaults to false.
|
||||
*/
|
||||
skip_records_with_empty_values: boolean;
|
||||
/**
|
||||
* Stop handling records after the requested number of records.
|
||||
*/
|
||||
to: number;
|
||||
/**
|
||||
* Stop handling records after the requested line number.
|
||||
*/
|
||||
to_line: number;
|
||||
/**
|
||||
* If true, ignore whitespace immediately around the delimiter, defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
trim: boolean;
|
||||
}
|
||||
|
||||
/*
|
||||
Note, could not `extends stream.TransformOptions` because encoding can be
|
||||
BufferEncoding and undefined as well as null which is not defined in the
|
||||
extended type.
|
||||
*/
|
||||
export interface Options<T = string[]> {
|
||||
/**
|
||||
* If true, the parser will attempt to convert read data types to native types.
|
||||
* @deprecated Use {@link cast}
|
||||
*/
|
||||
auto_parse?: boolean | CastingFunction;
|
||||
autoParse?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert read data types to dates. It requires the "auto_parse" option.
|
||||
* @deprecated Use {@link cast_date}
|
||||
*/
|
||||
auto_parse_date?: boolean | CastingDateFunction;
|
||||
autoParseDate?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* If true, detect and exclude the byte order mark (BOM) from the CSV input if present.
|
||||
*/
|
||||
bom?: boolean;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to native types.
|
||||
* If a function, receive the value as first argument, a context as second argument and return a new value. More information about the context properties is available below.
|
||||
*/
|
||||
cast?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to dates.
|
||||
* If a function, receive the value as argument and return a new value. It requires the "auto_parse" option. Be careful, it relies on Date.parse.
|
||||
*/
|
||||
cast_date?: boolean | CastingDateFunction;
|
||||
castDate?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* List of fields as an array,
|
||||
* a user defined callback accepting the first line and returning the column names or true if autodiscovered in the first CSV line,
|
||||
* default to null,
|
||||
* affect the result data set in the sense that records will be objects instead of arrays.
|
||||
*/
|
||||
columns?:
|
||||
| boolean
|
||||
| ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[]
|
||||
| ((
|
||||
record: T,
|
||||
) => ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[]);
|
||||
/**
|
||||
* Convert values into an array of values when columns are activated and
|
||||
* when multiple columns of the same name are found.
|
||||
*/
|
||||
group_columns_by_name?: boolean;
|
||||
groupColumnsByName?: boolean;
|
||||
/**
|
||||
* Treat all the characters after this one as a comment, default to '' (disabled).
|
||||
*/
|
||||
comment?: string | boolean | null;
|
||||
/**
|
||||
* Restrict the definition of comments to a full line. Comment characters
|
||||
* defined in the middle of the line are not interpreted as such. The
|
||||
* option require the activation of comments.
|
||||
*/
|
||||
comment_no_infix?: boolean | null;
|
||||
/**
|
||||
* Set the field delimiter. One character only, defaults to comma.
|
||||
*/
|
||||
delimiter?: string | string[] | Buffer;
|
||||
/**
|
||||
* Set the source and destination encoding, a value of `null` returns buffer instead of strings.
|
||||
*/
|
||||
encoding?: BufferEncoding | boolean | null | undefined;
|
||||
/**
|
||||
* Set the escape character, one character only, defaults to double quotes.
|
||||
*/
|
||||
escape?: string | null | boolean | Buffer;
|
||||
/**
|
||||
* Start handling records from the requested number of records.
|
||||
*/
|
||||
from?: number | string;
|
||||
/**
|
||||
* Start handling records from the requested line number.
|
||||
*/
|
||||
from_line?: null | number | string;
|
||||
fromLine?: null | number | string;
|
||||
/**
|
||||
* Don't interpret delimiters as such in the last field according to the number of fields calculated from the number of columns, the option require the presence of the `column` option when `true`.
|
||||
*/
|
||||
ignore_last_delimiters?: boolean | number;
|
||||
/**
|
||||
* Generate two properties `info` and `record` where `info` is a snapshot of the info object at the time the record was created and `record` is the parsed array or object.
|
||||
*/
|
||||
info?: boolean;
|
||||
/**
|
||||
* If true, ignore whitespace immediately following the delimiter (i.e. left-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
ltrim?: boolean | null;
|
||||
/**
|
||||
* Maximum numer of characters to be contained in the field and line buffers before an exception is raised,
|
||||
* used to guard against a wrong delimiter or record_delimiter,
|
||||
* default to 128000 characters.
|
||||
*/
|
||||
max_record_size?: number | null | string;
|
||||
maxRecordSize?: number;
|
||||
/**
|
||||
* Name of header-record title to name objects by.
|
||||
*/
|
||||
objname?: Buffer | null | number | string;
|
||||
/**
|
||||
* Alter and filter records by executing a user defined function.
|
||||
*/
|
||||
on_record?: (record: T, context: CastingContext) => T | null | undefined;
|
||||
onRecord?: (record: T, context: CastingContext) => T | null | undefined;
|
||||
/**
|
||||
* Function called when an error occured if the `skip_records_with_error`
|
||||
* option is activated.
|
||||
*/
|
||||
on_skip?: (err: CsvError | undefined, raw: string | undefined) => undefined;
|
||||
onSkip?: (err: CsvError | undefined, raw: string | undefined) => undefined;
|
||||
/**
|
||||
* Optional character surrounding a field, one character only, defaults to double quotes.
|
||||
*/
|
||||
quote?: string | boolean | Buffer | null;
|
||||
/**
|
||||
* Generate two properties raw and row where raw is the original CSV row content and row is the parsed array or object.
|
||||
*/
|
||||
raw?: boolean | null;
|
||||
/**
|
||||
* One or multiple characters used to delimit record rows; defaults to auto discovery if not provided.
|
||||
* Supported auto discovery method are Linux ("\n"), Apple ("\r") and Windows ("\r\n") row delimiters.
|
||||
*/
|
||||
record_delimiter?: string | Buffer | null | (string | Buffer | null)[];
|
||||
recordDelimiter?: string | Buffer | null | (string | Buffer | null)[];
|
||||
/**
|
||||
* Discard inconsistent columns count, default to false.
|
||||
*/
|
||||
relax_column_count?: boolean | null;
|
||||
relaxColumnCount?: boolean | null;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains less fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_less?: boolean | null;
|
||||
relaxColumnCountLess?: boolean | null;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains more fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_more?: boolean | null;
|
||||
relaxColumnCountMore?: boolean | null;
|
||||
/**
|
||||
* Preserve quotes inside unquoted field.
|
||||
*/
|
||||
relax_quotes?: boolean | null;
|
||||
relaxQuotes?: boolean | null;
|
||||
/**
|
||||
* If true, ignore whitespace immediately preceding the delimiter (i.e. right-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
rtrim?: boolean | null;
|
||||
/**
|
||||
* Dont generate empty values for empty lines.
|
||||
* Defaults to false
|
||||
*/
|
||||
skip_empty_lines?: boolean | null;
|
||||
skipEmptyLines?: boolean | null;
|
||||
/**
|
||||
* Don't generate records for lines containing empty column values (column matching /\s*\/), defaults to false.
|
||||
*/
|
||||
skip_records_with_empty_values?: boolean | null;
|
||||
skipRecordsWithEmptyValues?: boolean | null;
|
||||
/**
|
||||
* Skip a line with error found inside and directly go process the next line.
|
||||
*/
|
||||
skip_records_with_error?: boolean | null;
|
||||
skipRecordsWithError?: boolean | null;
|
||||
/**
|
||||
* Stop handling records after the requested number of records.
|
||||
*/
|
||||
to?: null | number | string;
|
||||
/**
|
||||
* Stop handling records after the requested line number.
|
||||
*/
|
||||
to_line?: null | number | string;
|
||||
toLine?: null | number | string;
|
||||
/**
|
||||
* If true, ignore whitespace immediately around the delimiter, defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
trim?: boolean | null;
|
||||
}
|
||||
|
||||
export type CsvErrorCode =
|
||||
| "CSV_INVALID_ARGUMENT"
|
||||
| "CSV_INVALID_CLOSING_QUOTE"
|
||||
| "CSV_INVALID_COLUMN_DEFINITION"
|
||||
| "CSV_INVALID_COLUMN_MAPPING"
|
||||
| "CSV_INVALID_OPTION_BOM"
|
||||
| "CSV_INVALID_OPTION_CAST"
|
||||
| "CSV_INVALID_OPTION_CAST_DATE"
|
||||
| "CSV_INVALID_OPTION_COLUMNS"
|
||||
| "CSV_INVALID_OPTION_COMMENT"
|
||||
| "CSV_INVALID_OPTION_DELIMITER"
|
||||
| "CSV_INVALID_OPTION_GROUP_COLUMNS_BY_NAME"
|
||||
| "CSV_INVALID_OPTION_ON_RECORD"
|
||||
| "CSV_MAX_RECORD_SIZE"
|
||||
| "CSV_NON_TRIMABLE_CHAR_AFTER_CLOSING_QUOTE"
|
||||
| "CSV_OPTION_COLUMNS_MISSING_NAME"
|
||||
| "CSV_QUOTE_NOT_CLOSED"
|
||||
| "CSV_RECORD_INCONSISTENT_FIELDS_LENGTH"
|
||||
| "CSV_RECORD_INCONSISTENT_COLUMNS"
|
||||
| "CSV_UNKNOWN_ERROR"
|
||||
| "INVALID_OPENING_QUOTE";
|
||||
|
||||
export class CsvError extends Error {
|
||||
readonly code: CsvErrorCode;
|
||||
[key: string]: any;
|
||||
|
||||
constructor(
|
||||
code: CsvErrorCode,
|
||||
message: string | string[],
|
||||
options?: OptionsNormalized,
|
||||
...contexts: unknown[]
|
||||
);
|
||||
}
|
||||
|
||||
type OptionsWithColumns<T> = Omit<Options<T>, "columns"> & {
|
||||
columns: Exclude<Options["columns"], undefined | false>;
|
||||
};
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
input: string | Buffer | Uint8Array,
|
||||
options: OptionsWithColumns<T>,
|
||||
callback?: Callback<T>,
|
||||
): Parser;
|
||||
declare function parse(
|
||||
input: string | Buffer | Uint8Array,
|
||||
options: Options,
|
||||
callback?: Callback,
|
||||
): Parser;
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
options: OptionsWithColumns<T>,
|
||||
callback?: Callback<T>,
|
||||
): Parser;
|
||||
declare function parse(options: Options, callback?: Callback): Parser;
|
||||
|
||||
declare function parse(
|
||||
input: string | Buffer | Uint8Array,
|
||||
callback?: Callback,
|
||||
): Parser;
|
||||
declare function parse(callback?: Callback): Parser;
|
||||
|
||||
// export default parse;
|
||||
export { parse };
|
||||
|
||||
declare function normalize_options(opts: Options): OptionsNormalized;
|
||||
export { normalize_options };
|
||||
6936
Jira_helper/node_modules/csv-parse/dist/esm/index.js
generated
vendored
Normal file
6936
Jira_helper/node_modules/csv-parse/dist/esm/index.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
17
Jira_helper/node_modules/csv-parse/dist/esm/stream.d.ts
generated
vendored
Normal file
17
Jira_helper/node_modules/csv-parse/dist/esm/stream.d.ts
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
import { Options } from "./index.js";
|
||||
|
||||
declare function parse(options?: Options): TransformStream;
|
||||
// export default parse;
|
||||
export { parse };
|
||||
|
||||
export {
|
||||
CastingContext,
|
||||
CastingFunction,
|
||||
CastingDateFunction,
|
||||
ColumnOption,
|
||||
Options,
|
||||
OptionsNormalized,
|
||||
Info,
|
||||
CsvErrorCode,
|
||||
CsvError,
|
||||
} from "./index.js";
|
||||
30
Jira_helper/node_modules/csv-parse/dist/esm/sync.d.ts
generated
vendored
Normal file
30
Jira_helper/node_modules/csv-parse/dist/esm/sync.d.ts
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
import { Options } from "./index.js";
|
||||
|
||||
type OptionsWithColumns<T> = Omit<Options<T>, "columns"> & {
|
||||
columns: Exclude<Options["columns"], undefined | false>;
|
||||
};
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
input: Buffer | string | Uint8Array,
|
||||
options: OptionsWithColumns<T>,
|
||||
): T[];
|
||||
declare function parse(
|
||||
input: Buffer | string | Uint8Array,
|
||||
options: Options,
|
||||
): string[][];
|
||||
declare function parse(input: Buffer | string | Uint8Array): string[][];
|
||||
|
||||
// export default parse;
|
||||
export { parse };
|
||||
|
||||
export {
|
||||
CastingContext,
|
||||
CastingFunction,
|
||||
CastingDateFunction,
|
||||
ColumnOption,
|
||||
Options,
|
||||
OptionsNormalized,
|
||||
Info,
|
||||
CsvErrorCode,
|
||||
CsvError,
|
||||
} from "./index.js";
|
||||
3755
Jira_helper/node_modules/csv-parse/dist/esm/sync.js
generated
vendored
Normal file
3755
Jira_helper/node_modules/csv-parse/dist/esm/sync.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
6946
Jira_helper/node_modules/csv-parse/dist/iife/index.js
generated
vendored
Normal file
6946
Jira_helper/node_modules/csv-parse/dist/iife/index.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3763
Jira_helper/node_modules/csv-parse/dist/iife/sync.js
generated
vendored
Normal file
3763
Jira_helper/node_modules/csv-parse/dist/iife/sync.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
6947
Jira_helper/node_modules/csv-parse/dist/umd/index.js
generated
vendored
Normal file
6947
Jira_helper/node_modules/csv-parse/dist/umd/index.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3764
Jira_helper/node_modules/csv-parse/dist/umd/sync.js
generated
vendored
Normal file
3764
Jira_helper/node_modules/csv-parse/dist/umd/sync.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
Jira_helper/node_modules/csv-parse/lib/api/CsvError.js
generated
vendored
Normal file
22
Jira_helper/node_modules/csv-parse/lib/api/CsvError.js
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
class CsvError extends Error {
|
||||
constructor(code, message, options, ...contexts) {
|
||||
if (Array.isArray(message)) message = message.join(" ").trim();
|
||||
super(message);
|
||||
if (Error.captureStackTrace !== undefined) {
|
||||
Error.captureStackTrace(this, CsvError);
|
||||
}
|
||||
this.code = code;
|
||||
for (const context of contexts) {
|
||||
for (const key in context) {
|
||||
const value = context[key];
|
||||
this[key] = Buffer.isBuffer(value)
|
||||
? value.toString(options.encoding)
|
||||
: value == null
|
||||
? value
|
||||
: JSON.parse(JSON.stringify(value));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export { CsvError };
|
||||
916
Jira_helper/node_modules/csv-parse/lib/api/index.js
generated
vendored
Normal file
916
Jira_helper/node_modules/csv-parse/lib/api/index.js
generated
vendored
Normal file
@@ -0,0 +1,916 @@
|
||||
import { normalize_columns_array } from "./normalize_columns_array.js";
|
||||
import { init_state } from "./init_state.js";
|
||||
import { normalize_options } from "./normalize_options.js";
|
||||
import { CsvError } from "./CsvError.js";
|
||||
|
||||
const isRecordEmpty = function (record) {
|
||||
return record.every(
|
||||
(field) =>
|
||||
field == null || (field.toString && field.toString().trim() === ""),
|
||||
);
|
||||
};
|
||||
|
||||
const cr = 13; // `\r`, carriage return, 0x0D in hexadécimal, 13 in decimal
|
||||
const nl = 10; // `\n`, newline, 0x0A in hexadecimal, 10 in decimal
|
||||
|
||||
const boms = {
|
||||
// Note, the following are equals:
|
||||
// Buffer.from("\ufeff")
|
||||
// Buffer.from([239, 187, 191])
|
||||
// Buffer.from('EFBBBF', 'hex')
|
||||
utf8: Buffer.from([239, 187, 191]),
|
||||
// Note, the following are equals:
|
||||
// Buffer.from "\ufeff", 'utf16le
|
||||
// Buffer.from([255, 254])
|
||||
utf16le: Buffer.from([255, 254]),
|
||||
};
|
||||
|
||||
const transform = function (original_options = {}) {
|
||||
const info = {
|
||||
bytes: 0,
|
||||
comment_lines: 0,
|
||||
empty_lines: 0,
|
||||
invalid_field_length: 0,
|
||||
lines: 1,
|
||||
records: 0,
|
||||
};
|
||||
const options = normalize_options(original_options);
|
||||
return {
|
||||
info: info,
|
||||
original_options: original_options,
|
||||
options: options,
|
||||
state: init_state(options),
|
||||
__needMoreData: function (i, bufLen, end) {
|
||||
if (end) return false;
|
||||
const { encoding, escape, quote } = this.options;
|
||||
const { quoting, needMoreDataSize, recordDelimiterMaxLength } =
|
||||
this.state;
|
||||
const numOfCharLeft = bufLen - i - 1;
|
||||
const requiredLength = Math.max(
|
||||
needMoreDataSize,
|
||||
// Skip if the remaining buffer smaller than record delimiter
|
||||
// If "record_delimiter" is yet to be discovered:
|
||||
// 1. It is equals to `[]` and "recordDelimiterMaxLength" equals `0`
|
||||
// 2. We set the length to windows line ending in the current encoding
|
||||
// Note, that encoding is known from user or bom discovery at that point
|
||||
// recordDelimiterMaxLength,
|
||||
recordDelimiterMaxLength === 0
|
||||
? Buffer.from("\r\n", encoding).length
|
||||
: recordDelimiterMaxLength,
|
||||
// Skip if remaining buffer can be an escaped quote
|
||||
quoting ? (escape === null ? 0 : escape.length) + quote.length : 0,
|
||||
// Skip if remaining buffer can be record delimiter following the closing quote
|
||||
quoting ? quote.length + recordDelimiterMaxLength : 0,
|
||||
);
|
||||
return numOfCharLeft < requiredLength;
|
||||
},
|
||||
// Central parser implementation
|
||||
parse: function (nextBuf, end, push, close) {
|
||||
const {
|
||||
bom,
|
||||
comment_no_infix,
|
||||
encoding,
|
||||
from_line,
|
||||
ltrim,
|
||||
max_record_size,
|
||||
raw,
|
||||
relax_quotes,
|
||||
rtrim,
|
||||
skip_empty_lines,
|
||||
to,
|
||||
to_line,
|
||||
} = this.options;
|
||||
let { comment, escape, quote, record_delimiter } = this.options;
|
||||
const { bomSkipped, previousBuf, rawBuffer, escapeIsQuote } = this.state;
|
||||
let buf;
|
||||
if (previousBuf === undefined) {
|
||||
if (nextBuf === undefined) {
|
||||
// Handle empty string
|
||||
close();
|
||||
return;
|
||||
} else {
|
||||
buf = nextBuf;
|
||||
}
|
||||
} else if (previousBuf !== undefined && nextBuf === undefined) {
|
||||
buf = previousBuf;
|
||||
} else {
|
||||
buf = Buffer.concat([previousBuf, nextBuf]);
|
||||
}
|
||||
// Handle UTF BOM
|
||||
if (bomSkipped === false) {
|
||||
if (bom === false) {
|
||||
this.state.bomSkipped = true;
|
||||
} else if (buf.length < 3) {
|
||||
// No enough data
|
||||
if (end === false) {
|
||||
// Wait for more data
|
||||
this.state.previousBuf = buf;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
for (const encoding in boms) {
|
||||
if (boms[encoding].compare(buf, 0, boms[encoding].length) === 0) {
|
||||
// Skip BOM
|
||||
const bomLength = boms[encoding].length;
|
||||
this.state.bufBytesStart += bomLength;
|
||||
buf = buf.slice(bomLength);
|
||||
// Renormalize original options with the new encoding
|
||||
const options = normalize_options({
|
||||
...this.original_options,
|
||||
encoding: encoding,
|
||||
});
|
||||
// Properties are merged with the existing options instance
|
||||
for (const key in options) {
|
||||
this.options[key] = options[key];
|
||||
}
|
||||
// Options will re-evaluate the Buffer with the new encoding
|
||||
({ comment, escape, quote } = this.options);
|
||||
break;
|
||||
}
|
||||
}
|
||||
this.state.bomSkipped = true;
|
||||
}
|
||||
}
|
||||
const bufLen = buf.length;
|
||||
let pos;
|
||||
for (pos = 0; pos < bufLen; pos++) {
|
||||
// Ensure we get enough space to look ahead
|
||||
// There should be a way to move this out of the loop
|
||||
if (this.__needMoreData(pos, bufLen, end)) {
|
||||
break;
|
||||
}
|
||||
if (this.state.wasRowDelimiter === true) {
|
||||
this.info.lines++;
|
||||
this.state.wasRowDelimiter = false;
|
||||
}
|
||||
if (to_line !== -1 && this.info.lines > to_line) {
|
||||
this.state.stop = true;
|
||||
close();
|
||||
return;
|
||||
}
|
||||
// Auto discovery of record_delimiter, unix, mac and windows supported
|
||||
if (this.state.quoting === false && record_delimiter.length === 0) {
|
||||
const record_delimiterCount = this.__autoDiscoverRecordDelimiter(
|
||||
buf,
|
||||
pos,
|
||||
);
|
||||
if (record_delimiterCount) {
|
||||
record_delimiter = this.options.record_delimiter;
|
||||
}
|
||||
}
|
||||
const chr = buf[pos];
|
||||
if (raw === true) {
|
||||
rawBuffer.append(chr);
|
||||
}
|
||||
if (
|
||||
(chr === cr || chr === nl) &&
|
||||
this.state.wasRowDelimiter === false
|
||||
) {
|
||||
this.state.wasRowDelimiter = true;
|
||||
}
|
||||
// Previous char was a valid escape char
|
||||
// treat the current char as a regular char
|
||||
if (this.state.escaping === true) {
|
||||
this.state.escaping = false;
|
||||
} else {
|
||||
// Escape is only active inside quoted fields
|
||||
// We are quoting, the char is an escape chr and there is a chr to escape
|
||||
// if(escape !== null && this.state.quoting === true && chr === escape && pos + 1 < bufLen){
|
||||
if (
|
||||
escape !== null &&
|
||||
this.state.quoting === true &&
|
||||
this.__isEscape(buf, pos, chr) &&
|
||||
pos + escape.length < bufLen
|
||||
) {
|
||||
if (escapeIsQuote) {
|
||||
if (this.__isQuote(buf, pos + escape.length)) {
|
||||
this.state.escaping = true;
|
||||
pos += escape.length - 1;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
this.state.escaping = true;
|
||||
pos += escape.length - 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// Not currently escaping and chr is a quote
|
||||
// TODO: need to compare bytes instead of single char
|
||||
if (this.state.commenting === false && this.__isQuote(buf, pos)) {
|
||||
if (this.state.quoting === true) {
|
||||
const nextChr = buf[pos + quote.length];
|
||||
const isNextChrTrimable =
|
||||
rtrim && this.__isCharTrimable(buf, pos + quote.length);
|
||||
const isNextChrComment =
|
||||
comment !== null &&
|
||||
this.__compareBytes(comment, buf, pos + quote.length, nextChr);
|
||||
const isNextChrDelimiter = this.__isDelimiter(
|
||||
buf,
|
||||
pos + quote.length,
|
||||
nextChr,
|
||||
);
|
||||
const isNextChrRecordDelimiter =
|
||||
record_delimiter.length === 0
|
||||
? this.__autoDiscoverRecordDelimiter(buf, pos + quote.length)
|
||||
: this.__isRecordDelimiter(nextChr, buf, pos + quote.length);
|
||||
// Escape a quote
|
||||
// Treat next char as a regular character
|
||||
if (
|
||||
escape !== null &&
|
||||
this.__isEscape(buf, pos, chr) &&
|
||||
this.__isQuote(buf, pos + escape.length)
|
||||
) {
|
||||
pos += escape.length - 1;
|
||||
} else if (
|
||||
!nextChr ||
|
||||
isNextChrDelimiter ||
|
||||
isNextChrRecordDelimiter ||
|
||||
isNextChrComment ||
|
||||
isNextChrTrimable
|
||||
) {
|
||||
this.state.quoting = false;
|
||||
this.state.wasQuoting = true;
|
||||
pos += quote.length - 1;
|
||||
continue;
|
||||
} else if (relax_quotes === false) {
|
||||
const err = this.__error(
|
||||
new CsvError(
|
||||
"CSV_INVALID_CLOSING_QUOTE",
|
||||
[
|
||||
"Invalid Closing Quote:",
|
||||
`got "${String.fromCharCode(nextChr)}"`,
|
||||
`at line ${this.info.lines}`,
|
||||
"instead of delimiter, record delimiter, trimable character",
|
||||
"(if activated) or comment",
|
||||
],
|
||||
this.options,
|
||||
this.__infoField(),
|
||||
),
|
||||
);
|
||||
if (err !== undefined) return err;
|
||||
} else {
|
||||
this.state.quoting = false;
|
||||
this.state.wasQuoting = true;
|
||||
this.state.field.prepend(quote);
|
||||
pos += quote.length - 1;
|
||||
}
|
||||
} else {
|
||||
if (this.state.field.length !== 0) {
|
||||
// In relax_quotes mode, treat opening quote preceded by chrs as regular
|
||||
if (relax_quotes === false) {
|
||||
const info = this.__infoField();
|
||||
const bom = Object.keys(boms)
|
||||
.map((b) =>
|
||||
boms[b].equals(this.state.field.toString()) ? b : false,
|
||||
)
|
||||
.filter(Boolean)[0];
|
||||
const err = this.__error(
|
||||
new CsvError(
|
||||
"INVALID_OPENING_QUOTE",
|
||||
[
|
||||
"Invalid Opening Quote:",
|
||||
`a quote is found on field ${JSON.stringify(info.column)} at line ${info.lines}, value is ${JSON.stringify(this.state.field.toString(encoding))}`,
|
||||
bom ? `(${bom} bom)` : undefined,
|
||||
],
|
||||
this.options,
|
||||
info,
|
||||
{
|
||||
field: this.state.field,
|
||||
},
|
||||
),
|
||||
);
|
||||
if (err !== undefined) return err;
|
||||
}
|
||||
} else {
|
||||
this.state.quoting = true;
|
||||
pos += quote.length - 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (this.state.quoting === false) {
|
||||
const recordDelimiterLength = this.__isRecordDelimiter(
|
||||
chr,
|
||||
buf,
|
||||
pos,
|
||||
);
|
||||
if (recordDelimiterLength !== 0) {
|
||||
// Do not emit comments which take a full line
|
||||
const skipCommentLine =
|
||||
this.state.commenting &&
|
||||
this.state.wasQuoting === false &&
|
||||
this.state.record.length === 0 &&
|
||||
this.state.field.length === 0;
|
||||
if (skipCommentLine) {
|
||||
this.info.comment_lines++;
|
||||
// Skip full comment line
|
||||
} else {
|
||||
// Activate records emition if above from_line
|
||||
if (
|
||||
this.state.enabled === false &&
|
||||
this.info.lines +
|
||||
(this.state.wasRowDelimiter === true ? 1 : 0) >=
|
||||
from_line
|
||||
) {
|
||||
this.state.enabled = true;
|
||||
this.__resetField();
|
||||
this.__resetRecord();
|
||||
pos += recordDelimiterLength - 1;
|
||||
continue;
|
||||
}
|
||||
// Skip if line is empty and skip_empty_lines activated
|
||||
if (
|
||||
skip_empty_lines === true &&
|
||||
this.state.wasQuoting === false &&
|
||||
this.state.record.length === 0 &&
|
||||
this.state.field.length === 0
|
||||
) {
|
||||
this.info.empty_lines++;
|
||||
pos += recordDelimiterLength - 1;
|
||||
continue;
|
||||
}
|
||||
this.info.bytes = this.state.bufBytesStart + pos;
|
||||
const errField = this.__onField();
|
||||
if (errField !== undefined) return errField;
|
||||
this.info.bytes =
|
||||
this.state.bufBytesStart + pos + recordDelimiterLength;
|
||||
const errRecord = this.__onRecord(push);
|
||||
if (errRecord !== undefined) return errRecord;
|
||||
if (to !== -1 && this.info.records >= to) {
|
||||
this.state.stop = true;
|
||||
close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
this.state.commenting = false;
|
||||
pos += recordDelimiterLength - 1;
|
||||
continue;
|
||||
}
|
||||
if (this.state.commenting) {
|
||||
continue;
|
||||
}
|
||||
if (
|
||||
comment !== null &&
|
||||
(comment_no_infix === false ||
|
||||
(this.state.record.length === 0 &&
|
||||
this.state.field.length === 0))
|
||||
) {
|
||||
const commentCount = this.__compareBytes(comment, buf, pos, chr);
|
||||
if (commentCount !== 0) {
|
||||
this.state.commenting = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
const delimiterLength = this.__isDelimiter(buf, pos, chr);
|
||||
if (delimiterLength !== 0) {
|
||||
this.info.bytes = this.state.bufBytesStart + pos;
|
||||
const errField = this.__onField();
|
||||
if (errField !== undefined) return errField;
|
||||
pos += delimiterLength - 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (this.state.commenting === false) {
|
||||
if (
|
||||
max_record_size !== 0 &&
|
||||
this.state.record_length + this.state.field.length > max_record_size
|
||||
) {
|
||||
return this.__error(
|
||||
new CsvError(
|
||||
"CSV_MAX_RECORD_SIZE",
|
||||
[
|
||||
"Max Record Size:",
|
||||
"record exceed the maximum number of tolerated bytes",
|
||||
`of ${max_record_size}`,
|
||||
`at line ${this.info.lines}`,
|
||||
],
|
||||
this.options,
|
||||
this.__infoField(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
const lappend =
|
||||
ltrim === false ||
|
||||
this.state.quoting === true ||
|
||||
this.state.field.length !== 0 ||
|
||||
!this.__isCharTrimable(buf, pos);
|
||||
// rtrim in non quoting is handle in __onField
|
||||
const rappend = rtrim === false || this.state.wasQuoting === false;
|
||||
if (lappend === true && rappend === true) {
|
||||
this.state.field.append(chr);
|
||||
} else if (rtrim === true && !this.__isCharTrimable(buf, pos)) {
|
||||
return this.__error(
|
||||
new CsvError(
|
||||
"CSV_NON_TRIMABLE_CHAR_AFTER_CLOSING_QUOTE",
|
||||
[
|
||||
"Invalid Closing Quote:",
|
||||
"found non trimable byte after quote",
|
||||
`at line ${this.info.lines}`,
|
||||
],
|
||||
this.options,
|
||||
this.__infoField(),
|
||||
),
|
||||
);
|
||||
} else {
|
||||
if (lappend === false) {
|
||||
pos += this.__isCharTrimable(buf, pos) - 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (end === true) {
|
||||
// Ensure we are not ending in a quoting state
|
||||
if (this.state.quoting === true) {
|
||||
const err = this.__error(
|
||||
new CsvError(
|
||||
"CSV_QUOTE_NOT_CLOSED",
|
||||
[
|
||||
"Quote Not Closed:",
|
||||
`the parsing is finished with an opening quote at line ${this.info.lines}`,
|
||||
],
|
||||
this.options,
|
||||
this.__infoField(),
|
||||
),
|
||||
);
|
||||
if (err !== undefined) return err;
|
||||
} else {
|
||||
// Skip last line if it has no characters
|
||||
if (
|
||||
this.state.wasQuoting === true ||
|
||||
this.state.record.length !== 0 ||
|
||||
this.state.field.length !== 0
|
||||
) {
|
||||
this.info.bytes = this.state.bufBytesStart + pos;
|
||||
const errField = this.__onField();
|
||||
if (errField !== undefined) return errField;
|
||||
const errRecord = this.__onRecord(push);
|
||||
if (errRecord !== undefined) return errRecord;
|
||||
} else if (this.state.wasRowDelimiter === true) {
|
||||
this.info.empty_lines++;
|
||||
} else if (this.state.commenting === true) {
|
||||
this.info.comment_lines++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
this.state.bufBytesStart += pos;
|
||||
this.state.previousBuf = buf.slice(pos);
|
||||
}
|
||||
if (this.state.wasRowDelimiter === true) {
|
||||
this.info.lines++;
|
||||
this.state.wasRowDelimiter = false;
|
||||
}
|
||||
},
|
||||
__onRecord: function (push) {
|
||||
const {
|
||||
columns,
|
||||
group_columns_by_name,
|
||||
encoding,
|
||||
info,
|
||||
from,
|
||||
relax_column_count,
|
||||
relax_column_count_less,
|
||||
relax_column_count_more,
|
||||
raw,
|
||||
skip_records_with_empty_values,
|
||||
} = this.options;
|
||||
const { enabled, record } = this.state;
|
||||
if (enabled === false) {
|
||||
return this.__resetRecord();
|
||||
}
|
||||
// Convert the first line into column names
|
||||
const recordLength = record.length;
|
||||
if (columns === true) {
|
||||
if (skip_records_with_empty_values === true && isRecordEmpty(record)) {
|
||||
this.__resetRecord();
|
||||
return;
|
||||
}
|
||||
return this.__firstLineToColumns(record);
|
||||
}
|
||||
if (columns === false && this.info.records === 0) {
|
||||
this.state.expectedRecordLength = recordLength;
|
||||
}
|
||||
if (recordLength !== this.state.expectedRecordLength) {
|
||||
const err =
|
||||
columns === false
|
||||
? new CsvError(
|
||||
"CSV_RECORD_INCONSISTENT_FIELDS_LENGTH",
|
||||
[
|
||||
"Invalid Record Length:",
|
||||
`expect ${this.state.expectedRecordLength},`,
|
||||
`got ${recordLength} on line ${this.info.lines}`,
|
||||
],
|
||||
this.options,
|
||||
this.__infoField(),
|
||||
{
|
||||
record: record,
|
||||
},
|
||||
)
|
||||
: new CsvError(
|
||||
"CSV_RECORD_INCONSISTENT_COLUMNS",
|
||||
[
|
||||
"Invalid Record Length:",
|
||||
`columns length is ${columns.length},`, // rename columns
|
||||
`got ${recordLength} on line ${this.info.lines}`,
|
||||
],
|
||||
this.options,
|
||||
this.__infoField(),
|
||||
{
|
||||
record: record,
|
||||
},
|
||||
);
|
||||
if (
|
||||
relax_column_count === true ||
|
||||
(relax_column_count_less === true &&
|
||||
recordLength < this.state.expectedRecordLength) ||
|
||||
(relax_column_count_more === true &&
|
||||
recordLength > this.state.expectedRecordLength)
|
||||
) {
|
||||
this.info.invalid_field_length++;
|
||||
this.state.error = err;
|
||||
// Error is undefined with skip_records_with_error
|
||||
} else {
|
||||
const finalErr = this.__error(err);
|
||||
if (finalErr) return finalErr;
|
||||
}
|
||||
}
|
||||
if (skip_records_with_empty_values === true && isRecordEmpty(record)) {
|
||||
this.__resetRecord();
|
||||
return;
|
||||
}
|
||||
if (this.state.recordHasError === true) {
|
||||
this.__resetRecord();
|
||||
this.state.recordHasError = false;
|
||||
return;
|
||||
}
|
||||
this.info.records++;
|
||||
if (from === 1 || this.info.records >= from) {
|
||||
const { objname } = this.options;
|
||||
// With columns, records are object
|
||||
if (columns !== false) {
|
||||
const obj = {};
|
||||
// Transform record array to an object
|
||||
for (let i = 0, l = record.length; i < l; i++) {
|
||||
if (columns[i] === undefined || columns[i].disabled) continue;
|
||||
// Turn duplicate columns into an array
|
||||
if (
|
||||
group_columns_by_name === true &&
|
||||
obj[columns[i].name] !== undefined
|
||||
) {
|
||||
if (Array.isArray(obj[columns[i].name])) {
|
||||
obj[columns[i].name] = obj[columns[i].name].concat(record[i]);
|
||||
} else {
|
||||
obj[columns[i].name] = [obj[columns[i].name], record[i]];
|
||||
}
|
||||
} else {
|
||||
obj[columns[i].name] = record[i];
|
||||
}
|
||||
}
|
||||
// Without objname (default)
|
||||
if (raw === true || info === true) {
|
||||
const extRecord = Object.assign(
|
||||
{ record: obj },
|
||||
raw === true
|
||||
? { raw: this.state.rawBuffer.toString(encoding) }
|
||||
: {},
|
||||
info === true ? { info: this.__infoRecord() } : {},
|
||||
);
|
||||
const err = this.__push(
|
||||
objname === undefined ? extRecord : [obj[objname], extRecord],
|
||||
push,
|
||||
);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
const err = this.__push(
|
||||
objname === undefined ? obj : [obj[objname], obj],
|
||||
push,
|
||||
);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
// Without columns, records are array
|
||||
} else {
|
||||
if (raw === true || info === true) {
|
||||
const extRecord = Object.assign(
|
||||
{ record: record },
|
||||
raw === true
|
||||
? { raw: this.state.rawBuffer.toString(encoding) }
|
||||
: {},
|
||||
info === true ? { info: this.__infoRecord() } : {},
|
||||
);
|
||||
const err = this.__push(
|
||||
objname === undefined ? extRecord : [record[objname], extRecord],
|
||||
push,
|
||||
);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
const err = this.__push(
|
||||
objname === undefined ? record : [record[objname], record],
|
||||
push,
|
||||
);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
this.__resetRecord();
|
||||
},
|
||||
__firstLineToColumns: function (record) {
|
||||
const { firstLineToHeaders } = this.state;
|
||||
try {
|
||||
const headers =
|
||||
firstLineToHeaders === undefined
|
||||
? record
|
||||
: firstLineToHeaders.call(null, record);
|
||||
if (!Array.isArray(headers)) {
|
||||
return this.__error(
|
||||
new CsvError(
|
||||
"CSV_INVALID_COLUMN_MAPPING",
|
||||
[
|
||||
"Invalid Column Mapping:",
|
||||
"expect an array from column function,",
|
||||
`got ${JSON.stringify(headers)}`,
|
||||
],
|
||||
this.options,
|
||||
this.__infoField(),
|
||||
{
|
||||
headers: headers,
|
||||
},
|
||||
),
|
||||
);
|
||||
}
|
||||
const normalizedHeaders = normalize_columns_array(headers);
|
||||
this.state.expectedRecordLength = normalizedHeaders.length;
|
||||
this.options.columns = normalizedHeaders;
|
||||
this.__resetRecord();
|
||||
return;
|
||||
} catch (err) {
|
||||
return err;
|
||||
}
|
||||
},
|
||||
__resetRecord: function () {
|
||||
if (this.options.raw === true) {
|
||||
this.state.rawBuffer.reset();
|
||||
}
|
||||
this.state.error = undefined;
|
||||
this.state.record = [];
|
||||
this.state.record_length = 0;
|
||||
},
|
||||
__onField: function () {
|
||||
const { cast, encoding, rtrim, max_record_size } = this.options;
|
||||
const { enabled, wasQuoting } = this.state;
|
||||
// Short circuit for the from_line options
|
||||
if (enabled === false) {
|
||||
return this.__resetField();
|
||||
}
|
||||
let field = this.state.field.toString(encoding);
|
||||
if (rtrim === true && wasQuoting === false) {
|
||||
field = field.trimRight();
|
||||
}
|
||||
if (cast === true) {
|
||||
const [err, f] = this.__cast(field);
|
||||
if (err !== undefined) return err;
|
||||
field = f;
|
||||
}
|
||||
this.state.record.push(field);
|
||||
// Increment record length if record size must not exceed a limit
|
||||
if (max_record_size !== 0 && typeof field === "string") {
|
||||
this.state.record_length += field.length;
|
||||
}
|
||||
this.__resetField();
|
||||
},
|
||||
__resetField: function () {
|
||||
this.state.field.reset();
|
||||
this.state.wasQuoting = false;
|
||||
},
|
||||
__push: function (record, push) {
|
||||
const { on_record } = this.options;
|
||||
if (on_record !== undefined) {
|
||||
const info = this.__infoRecord();
|
||||
try {
|
||||
record = on_record.call(null, record, info);
|
||||
} catch (err) {
|
||||
return err;
|
||||
}
|
||||
if (record === undefined || record === null) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
push(record);
|
||||
},
|
||||
// Return a tuple with the error and the casted value
|
||||
__cast: function (field) {
|
||||
const { columns, relax_column_count } = this.options;
|
||||
const isColumns = Array.isArray(columns);
|
||||
// Dont loose time calling cast
|
||||
// because the final record is an object
|
||||
// and this field can't be associated to a key present in columns
|
||||
if (
|
||||
isColumns === true &&
|
||||
relax_column_count &&
|
||||
this.options.columns.length <= this.state.record.length
|
||||
) {
|
||||
return [undefined, undefined];
|
||||
}
|
||||
if (this.state.castField !== null) {
|
||||
try {
|
||||
const info = this.__infoField();
|
||||
return [undefined, this.state.castField.call(null, field, info)];
|
||||
} catch (err) {
|
||||
return [err];
|
||||
}
|
||||
}
|
||||
if (this.__isFloat(field)) {
|
||||
return [undefined, parseFloat(field)];
|
||||
} else if (this.options.cast_date !== false) {
|
||||
const info = this.__infoField();
|
||||
return [undefined, this.options.cast_date.call(null, field, info)];
|
||||
}
|
||||
return [undefined, field];
|
||||
},
|
||||
// Helper to test if a character is a space or a line delimiter
|
||||
__isCharTrimable: function (buf, pos) {
|
||||
const isTrim = (buf, pos) => {
|
||||
const { timchars } = this.state;
|
||||
loop1: for (let i = 0; i < timchars.length; i++) {
|
||||
const timchar = timchars[i];
|
||||
for (let j = 0; j < timchar.length; j++) {
|
||||
if (timchar[j] !== buf[pos + j]) continue loop1;
|
||||
}
|
||||
return timchar.length;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
return isTrim(buf, pos);
|
||||
},
|
||||
// Keep it in case we implement the `cast_int` option
|
||||
// __isInt(value){
|
||||
// // return Number.isInteger(parseInt(value))
|
||||
// // return !isNaN( parseInt( obj ) );
|
||||
// return /^(\-|\+)?[1-9][0-9]*$/.test(value)
|
||||
// }
|
||||
__isFloat: function (value) {
|
||||
return value - parseFloat(value) + 1 >= 0; // Borrowed from jquery
|
||||
},
|
||||
__compareBytes: function (sourceBuf, targetBuf, targetPos, firstByte) {
|
||||
if (sourceBuf[0] !== firstByte) return 0;
|
||||
const sourceLength = sourceBuf.length;
|
||||
for (let i = 1; i < sourceLength; i++) {
|
||||
if (sourceBuf[i] !== targetBuf[targetPos + i]) return 0;
|
||||
}
|
||||
return sourceLength;
|
||||
},
|
||||
__isDelimiter: function (buf, pos, chr) {
|
||||
const { delimiter, ignore_last_delimiters } = this.options;
|
||||
if (
|
||||
ignore_last_delimiters === true &&
|
||||
this.state.record.length === this.options.columns.length - 1
|
||||
) {
|
||||
return 0;
|
||||
} else if (
|
||||
ignore_last_delimiters !== false &&
|
||||
typeof ignore_last_delimiters === "number" &&
|
||||
this.state.record.length === ignore_last_delimiters - 1
|
||||
) {
|
||||
return 0;
|
||||
}
|
||||
loop1: for (let i = 0; i < delimiter.length; i++) {
|
||||
const del = delimiter[i];
|
||||
if (del[0] === chr) {
|
||||
for (let j = 1; j < del.length; j++) {
|
||||
if (del[j] !== buf[pos + j]) continue loop1;
|
||||
}
|
||||
return del.length;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
},
|
||||
__isRecordDelimiter: function (chr, buf, pos) {
|
||||
const { record_delimiter } = this.options;
|
||||
const recordDelimiterLength = record_delimiter.length;
|
||||
loop1: for (let i = 0; i < recordDelimiterLength; i++) {
|
||||
const rd = record_delimiter[i];
|
||||
const rdLength = rd.length;
|
||||
if (rd[0] !== chr) {
|
||||
continue;
|
||||
}
|
||||
for (let j = 1; j < rdLength; j++) {
|
||||
if (rd[j] !== buf[pos + j]) {
|
||||
continue loop1;
|
||||
}
|
||||
}
|
||||
return rd.length;
|
||||
}
|
||||
return 0;
|
||||
},
|
||||
__isEscape: function (buf, pos, chr) {
|
||||
const { escape } = this.options;
|
||||
if (escape === null) return false;
|
||||
const l = escape.length;
|
||||
if (escape[0] === chr) {
|
||||
for (let i = 0; i < l; i++) {
|
||||
if (escape[i] !== buf[pos + i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
__isQuote: function (buf, pos) {
|
||||
const { quote } = this.options;
|
||||
if (quote === null) return false;
|
||||
const l = quote.length;
|
||||
for (let i = 0; i < l; i++) {
|
||||
if (quote[i] !== buf[pos + i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
},
|
||||
__autoDiscoverRecordDelimiter: function (buf, pos) {
|
||||
const { encoding } = this.options;
|
||||
// Note, we don't need to cache this information in state,
|
||||
// It is only called on the first line until we find out a suitable
|
||||
// record delimiter.
|
||||
const rds = [
|
||||
// Important, the windows line ending must be before mac os 9
|
||||
Buffer.from("\r\n", encoding),
|
||||
Buffer.from("\n", encoding),
|
||||
Buffer.from("\r", encoding),
|
||||
];
|
||||
loop: for (let i = 0; i < rds.length; i++) {
|
||||
const l = rds[i].length;
|
||||
for (let j = 0; j < l; j++) {
|
||||
if (rds[i][j] !== buf[pos + j]) {
|
||||
continue loop;
|
||||
}
|
||||
}
|
||||
this.options.record_delimiter.push(rds[i]);
|
||||
this.state.recordDelimiterMaxLength = rds[i].length;
|
||||
return rds[i].length;
|
||||
}
|
||||
return 0;
|
||||
},
|
||||
__error: function (msg) {
|
||||
const { encoding, raw, skip_records_with_error } = this.options;
|
||||
const err = typeof msg === "string" ? new Error(msg) : msg;
|
||||
if (skip_records_with_error) {
|
||||
this.state.recordHasError = true;
|
||||
if (this.options.on_skip !== undefined) {
|
||||
try {
|
||||
this.options.on_skip(
|
||||
err,
|
||||
raw ? this.state.rawBuffer.toString(encoding) : undefined,
|
||||
);
|
||||
} catch (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
// this.emit('skip', err, raw ? this.state.rawBuffer.toString(encoding) : undefined);
|
||||
return undefined;
|
||||
} else {
|
||||
return err;
|
||||
}
|
||||
},
|
||||
__infoDataSet: function () {
|
||||
return {
|
||||
...this.info,
|
||||
columns: this.options.columns,
|
||||
};
|
||||
},
|
||||
__infoRecord: function () {
|
||||
const { columns, raw, encoding } = this.options;
|
||||
return {
|
||||
...this.__infoDataSet(),
|
||||
error: this.state.error,
|
||||
header: columns === true,
|
||||
index: this.state.record.length,
|
||||
raw: raw ? this.state.rawBuffer.toString(encoding) : undefined,
|
||||
};
|
||||
},
|
||||
__infoField: function () {
|
||||
const { columns } = this.options;
|
||||
const isColumns = Array.isArray(columns);
|
||||
return {
|
||||
...this.__infoRecord(),
|
||||
column:
|
||||
isColumns === true
|
||||
? columns.length > this.state.record.length
|
||||
? columns[this.state.record.length].name
|
||||
: null
|
||||
: this.state.record.length,
|
||||
quoting: this.state.wasQuoting,
|
||||
};
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
export { transform, CsvError };
|
||||
68
Jira_helper/node_modules/csv-parse/lib/api/init_state.js
generated
vendored
Normal file
68
Jira_helper/node_modules/csv-parse/lib/api/init_state.js
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
import ResizeableBuffer from "../utils/ResizeableBuffer.js";
|
||||
|
||||
// white space characters
|
||||
// https://en.wikipedia.org/wiki/Whitespace_character
|
||||
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions/Character_Classes#Types
|
||||
// \f\n\r\t\v\u00a0\u1680\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff
|
||||
const np = 12;
|
||||
const cr = 13; // `\r`, carriage return, 0x0D in hexadécimal, 13 in decimal
|
||||
const nl = 10; // `\n`, newline, 0x0A in hexadecimal, 10 in decimal
|
||||
const space = 32;
|
||||
const tab = 9;
|
||||
|
||||
const init_state = function (options) {
|
||||
return {
|
||||
bomSkipped: false,
|
||||
bufBytesStart: 0,
|
||||
castField: options.cast_function,
|
||||
commenting: false,
|
||||
// Current error encountered by a record
|
||||
error: undefined,
|
||||
enabled: options.from_line === 1,
|
||||
escaping: false,
|
||||
escapeIsQuote:
|
||||
Buffer.isBuffer(options.escape) &&
|
||||
Buffer.isBuffer(options.quote) &&
|
||||
Buffer.compare(options.escape, options.quote) === 0,
|
||||
// columns can be `false`, `true`, `Array`
|
||||
expectedRecordLength: Array.isArray(options.columns)
|
||||
? options.columns.length
|
||||
: undefined,
|
||||
field: new ResizeableBuffer(20),
|
||||
firstLineToHeaders: options.cast_first_line_to_header,
|
||||
needMoreDataSize: Math.max(
|
||||
// Skip if the remaining buffer smaller than comment
|
||||
options.comment !== null ? options.comment.length : 0,
|
||||
// Skip if the remaining buffer can be delimiter
|
||||
...options.delimiter.map((delimiter) => delimiter.length),
|
||||
// Skip if the remaining buffer can be escape sequence
|
||||
options.quote !== null ? options.quote.length : 0,
|
||||
),
|
||||
previousBuf: undefined,
|
||||
quoting: false,
|
||||
stop: false,
|
||||
rawBuffer: new ResizeableBuffer(100),
|
||||
record: [],
|
||||
recordHasError: false,
|
||||
record_length: 0,
|
||||
recordDelimiterMaxLength:
|
||||
options.record_delimiter.length === 0
|
||||
? 0
|
||||
: Math.max(...options.record_delimiter.map((v) => v.length)),
|
||||
trimChars: [
|
||||
Buffer.from(" ", options.encoding)[0],
|
||||
Buffer.from("\t", options.encoding)[0],
|
||||
],
|
||||
wasQuoting: false,
|
||||
wasRowDelimiter: false,
|
||||
timchars: [
|
||||
Buffer.from(Buffer.from([cr], "utf8").toString(), options.encoding),
|
||||
Buffer.from(Buffer.from([nl], "utf8").toString(), options.encoding),
|
||||
Buffer.from(Buffer.from([np], "utf8").toString(), options.encoding),
|
||||
Buffer.from(Buffer.from([space], "utf8").toString(), options.encoding),
|
||||
Buffer.from(Buffer.from([tab], "utf8").toString(), options.encoding),
|
||||
],
|
||||
};
|
||||
};
|
||||
|
||||
export { init_state };
|
||||
32
Jira_helper/node_modules/csv-parse/lib/api/normalize_columns_array.js
generated
vendored
Normal file
32
Jira_helper/node_modules/csv-parse/lib/api/normalize_columns_array.js
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
import { CsvError } from "./CsvError.js";
|
||||
import { is_object } from "../utils/is_object.js";
|
||||
|
||||
const normalize_columns_array = function (columns) {
|
||||
const normalizedColumns = [];
|
||||
for (let i = 0, l = columns.length; i < l; i++) {
|
||||
const column = columns[i];
|
||||
if (column === undefined || column === null || column === false) {
|
||||
normalizedColumns[i] = { disabled: true };
|
||||
} else if (typeof column === "string") {
|
||||
normalizedColumns[i] = { name: column };
|
||||
} else if (is_object(column)) {
|
||||
if (typeof column.name !== "string") {
|
||||
throw new CsvError("CSV_OPTION_COLUMNS_MISSING_NAME", [
|
||||
"Option columns missing name:",
|
||||
`property "name" is required at position ${i}`,
|
||||
"when column is an object literal",
|
||||
]);
|
||||
}
|
||||
normalizedColumns[i] = column;
|
||||
} else {
|
||||
throw new CsvError("CSV_INVALID_COLUMN_DEFINITION", [
|
||||
"Invalid column definition:",
|
||||
"expect a string or a literal object,",
|
||||
`got ${JSON.stringify(column)} at position ${i}`,
|
||||
]);
|
||||
}
|
||||
}
|
||||
return normalizedColumns;
|
||||
};
|
||||
|
||||
export { normalize_columns_array };
|
||||
691
Jira_helper/node_modules/csv-parse/lib/api/normalize_options.js
generated
vendored
Normal file
691
Jira_helper/node_modules/csv-parse/lib/api/normalize_options.js
generated
vendored
Normal file
@@ -0,0 +1,691 @@
|
||||
import { normalize_columns_array } from "./normalize_columns_array.js";
|
||||
import { CsvError } from "./CsvError.js";
|
||||
import { underscore } from "../utils/underscore.js";
|
||||
|
||||
const normalize_options = function (opts) {
|
||||
const options = {};
|
||||
// Merge with user options
|
||||
for (const opt in opts) {
|
||||
options[underscore(opt)] = opts[opt];
|
||||
}
|
||||
// Normalize option `encoding`
|
||||
// Note: defined first because other options depends on it
|
||||
// to convert chars/strings into buffers.
|
||||
if (options.encoding === undefined || options.encoding === true) {
|
||||
options.encoding = "utf8";
|
||||
} else if (options.encoding === null || options.encoding === false) {
|
||||
options.encoding = null;
|
||||
} else if (
|
||||
typeof options.encoding !== "string" &&
|
||||
options.encoding !== null
|
||||
) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_ENCODING",
|
||||
[
|
||||
"Invalid option encoding:",
|
||||
"encoding must be a string or null to return a buffer,",
|
||||
`got ${JSON.stringify(options.encoding)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `bom`
|
||||
if (
|
||||
options.bom === undefined ||
|
||||
options.bom === null ||
|
||||
options.bom === false
|
||||
) {
|
||||
options.bom = false;
|
||||
} else if (options.bom !== true) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_BOM",
|
||||
[
|
||||
"Invalid option bom:",
|
||||
"bom must be true,",
|
||||
`got ${JSON.stringify(options.bom)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `cast`
|
||||
options.cast_function = null;
|
||||
if (
|
||||
options.cast === undefined ||
|
||||
options.cast === null ||
|
||||
options.cast === false ||
|
||||
options.cast === ""
|
||||
) {
|
||||
options.cast = undefined;
|
||||
} else if (typeof options.cast === "function") {
|
||||
options.cast_function = options.cast;
|
||||
options.cast = true;
|
||||
} else if (options.cast !== true) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_CAST",
|
||||
[
|
||||
"Invalid option cast:",
|
||||
"cast must be true or a function,",
|
||||
`got ${JSON.stringify(options.cast)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `cast_date`
|
||||
if (
|
||||
options.cast_date === undefined ||
|
||||
options.cast_date === null ||
|
||||
options.cast_date === false ||
|
||||
options.cast_date === ""
|
||||
) {
|
||||
options.cast_date = false;
|
||||
} else if (options.cast_date === true) {
|
||||
options.cast_date = function (value) {
|
||||
const date = Date.parse(value);
|
||||
return !isNaN(date) ? new Date(date) : value;
|
||||
};
|
||||
} else if (typeof options.cast_date !== "function") {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_CAST_DATE",
|
||||
[
|
||||
"Invalid option cast_date:",
|
||||
"cast_date must be true or a function,",
|
||||
`got ${JSON.stringify(options.cast_date)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `columns`
|
||||
options.cast_first_line_to_header = undefined;
|
||||
if (options.columns === true) {
|
||||
// Fields in the first line are converted as-is to columns
|
||||
options.cast_first_line_to_header = undefined;
|
||||
} else if (typeof options.columns === "function") {
|
||||
options.cast_first_line_to_header = options.columns;
|
||||
options.columns = true;
|
||||
} else if (Array.isArray(options.columns)) {
|
||||
options.columns = normalize_columns_array(options.columns);
|
||||
} else if (
|
||||
options.columns === undefined ||
|
||||
options.columns === null ||
|
||||
options.columns === false
|
||||
) {
|
||||
options.columns = false;
|
||||
} else {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_COLUMNS",
|
||||
[
|
||||
"Invalid option columns:",
|
||||
"expect an array, a function or true,",
|
||||
`got ${JSON.stringify(options.columns)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `group_columns_by_name`
|
||||
if (
|
||||
options.group_columns_by_name === undefined ||
|
||||
options.group_columns_by_name === null ||
|
||||
options.group_columns_by_name === false
|
||||
) {
|
||||
options.group_columns_by_name = false;
|
||||
} else if (options.group_columns_by_name !== true) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_GROUP_COLUMNS_BY_NAME",
|
||||
[
|
||||
"Invalid option group_columns_by_name:",
|
||||
"expect an boolean,",
|
||||
`got ${JSON.stringify(options.group_columns_by_name)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
} else if (options.columns === false) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_GROUP_COLUMNS_BY_NAME",
|
||||
[
|
||||
"Invalid option group_columns_by_name:",
|
||||
"the `columns` mode must be activated.",
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `comment`
|
||||
if (
|
||||
options.comment === undefined ||
|
||||
options.comment === null ||
|
||||
options.comment === false ||
|
||||
options.comment === ""
|
||||
) {
|
||||
options.comment = null;
|
||||
} else {
|
||||
if (typeof options.comment === "string") {
|
||||
options.comment = Buffer.from(options.comment, options.encoding);
|
||||
}
|
||||
if (!Buffer.isBuffer(options.comment)) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_COMMENT",
|
||||
[
|
||||
"Invalid option comment:",
|
||||
"comment must be a buffer or a string,",
|
||||
`got ${JSON.stringify(options.comment)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
}
|
||||
// Normalize option `comment_no_infix`
|
||||
if (
|
||||
options.comment_no_infix === undefined ||
|
||||
options.comment_no_infix === null ||
|
||||
options.comment_no_infix === false
|
||||
) {
|
||||
options.comment_no_infix = false;
|
||||
} else if (options.comment_no_infix !== true) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_COMMENT",
|
||||
[
|
||||
"Invalid option comment_no_infix:",
|
||||
"value must be a boolean,",
|
||||
`got ${JSON.stringify(options.comment_no_infix)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `delimiter`
|
||||
const delimiter_json = JSON.stringify(options.delimiter);
|
||||
if (!Array.isArray(options.delimiter))
|
||||
options.delimiter = [options.delimiter];
|
||||
if (options.delimiter.length === 0) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_DELIMITER",
|
||||
[
|
||||
"Invalid option delimiter:",
|
||||
"delimiter must be a non empty string or buffer or array of string|buffer,",
|
||||
`got ${delimiter_json}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
options.delimiter = options.delimiter.map(function (delimiter) {
|
||||
if (delimiter === undefined || delimiter === null || delimiter === false) {
|
||||
return Buffer.from(",", options.encoding);
|
||||
}
|
||||
if (typeof delimiter === "string") {
|
||||
delimiter = Buffer.from(delimiter, options.encoding);
|
||||
}
|
||||
if (!Buffer.isBuffer(delimiter) || delimiter.length === 0) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_DELIMITER",
|
||||
[
|
||||
"Invalid option delimiter:",
|
||||
"delimiter must be a non empty string or buffer or array of string|buffer,",
|
||||
`got ${delimiter_json}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
return delimiter;
|
||||
});
|
||||
// Normalize option `escape`
|
||||
if (options.escape === undefined || options.escape === true) {
|
||||
options.escape = Buffer.from('"', options.encoding);
|
||||
} else if (typeof options.escape === "string") {
|
||||
options.escape = Buffer.from(options.escape, options.encoding);
|
||||
} else if (options.escape === null || options.escape === false) {
|
||||
options.escape = null;
|
||||
}
|
||||
if (options.escape !== null) {
|
||||
if (!Buffer.isBuffer(options.escape)) {
|
||||
throw new Error(
|
||||
`Invalid Option: escape must be a buffer, a string or a boolean, got ${JSON.stringify(options.escape)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
// Normalize option `from`
|
||||
if (options.from === undefined || options.from === null) {
|
||||
options.from = 1;
|
||||
} else {
|
||||
if (typeof options.from === "string" && /\d+/.test(options.from)) {
|
||||
options.from = parseInt(options.from);
|
||||
}
|
||||
if (Number.isInteger(options.from)) {
|
||||
if (options.from < 0) {
|
||||
throw new Error(
|
||||
`Invalid Option: from must be a positive integer, got ${JSON.stringify(opts.from)}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: from must be an integer, got ${JSON.stringify(options.from)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
// Normalize option `from_line`
|
||||
if (options.from_line === undefined || options.from_line === null) {
|
||||
options.from_line = 1;
|
||||
} else {
|
||||
if (
|
||||
typeof options.from_line === "string" &&
|
||||
/\d+/.test(options.from_line)
|
||||
) {
|
||||
options.from_line = parseInt(options.from_line);
|
||||
}
|
||||
if (Number.isInteger(options.from_line)) {
|
||||
if (options.from_line <= 0) {
|
||||
throw new Error(
|
||||
`Invalid Option: from_line must be a positive integer greater than 0, got ${JSON.stringify(opts.from_line)}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: from_line must be an integer, got ${JSON.stringify(opts.from_line)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
// Normalize options `ignore_last_delimiters`
|
||||
if (
|
||||
options.ignore_last_delimiters === undefined ||
|
||||
options.ignore_last_delimiters === null
|
||||
) {
|
||||
options.ignore_last_delimiters = false;
|
||||
} else if (typeof options.ignore_last_delimiters === "number") {
|
||||
options.ignore_last_delimiters = Math.floor(options.ignore_last_delimiters);
|
||||
if (options.ignore_last_delimiters === 0) {
|
||||
options.ignore_last_delimiters = false;
|
||||
}
|
||||
} else if (typeof options.ignore_last_delimiters !== "boolean") {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_IGNORE_LAST_DELIMITERS",
|
||||
[
|
||||
"Invalid option `ignore_last_delimiters`:",
|
||||
"the value must be a boolean value or an integer,",
|
||||
`got ${JSON.stringify(options.ignore_last_delimiters)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
if (options.ignore_last_delimiters === true && options.columns === false) {
|
||||
throw new CsvError(
|
||||
"CSV_IGNORE_LAST_DELIMITERS_REQUIRES_COLUMNS",
|
||||
[
|
||||
"The option `ignore_last_delimiters`",
|
||||
"requires the activation of the `columns` option",
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `info`
|
||||
if (
|
||||
options.info === undefined ||
|
||||
options.info === null ||
|
||||
options.info === false
|
||||
) {
|
||||
options.info = false;
|
||||
} else if (options.info !== true) {
|
||||
throw new Error(
|
||||
`Invalid Option: info must be true, got ${JSON.stringify(options.info)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `max_record_size`
|
||||
if (
|
||||
options.max_record_size === undefined ||
|
||||
options.max_record_size === null ||
|
||||
options.max_record_size === false
|
||||
) {
|
||||
options.max_record_size = 0;
|
||||
} else if (
|
||||
Number.isInteger(options.max_record_size) &&
|
||||
options.max_record_size >= 0
|
||||
) {
|
||||
// Great, nothing to do
|
||||
} else if (
|
||||
typeof options.max_record_size === "string" &&
|
||||
/\d+/.test(options.max_record_size)
|
||||
) {
|
||||
options.max_record_size = parseInt(options.max_record_size);
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: max_record_size must be a positive integer, got ${JSON.stringify(options.max_record_size)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `objname`
|
||||
if (
|
||||
options.objname === undefined ||
|
||||
options.objname === null ||
|
||||
options.objname === false
|
||||
) {
|
||||
options.objname = undefined;
|
||||
} else if (Buffer.isBuffer(options.objname)) {
|
||||
if (options.objname.length === 0) {
|
||||
throw new Error(`Invalid Option: objname must be a non empty buffer`);
|
||||
}
|
||||
if (options.encoding === null) {
|
||||
// Don't call `toString`, leave objname as a buffer
|
||||
} else {
|
||||
options.objname = options.objname.toString(options.encoding);
|
||||
}
|
||||
} else if (typeof options.objname === "string") {
|
||||
if (options.objname.length === 0) {
|
||||
throw new Error(`Invalid Option: objname must be a non empty string`);
|
||||
}
|
||||
// Great, nothing to do
|
||||
} else if (typeof options.objname === "number") {
|
||||
// if(options.objname.length === 0){
|
||||
// throw new Error(`Invalid Option: objname must be a non empty string`);
|
||||
// }
|
||||
// Great, nothing to do
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: objname must be a string or a buffer, got ${options.objname}`,
|
||||
);
|
||||
}
|
||||
if (options.objname !== undefined) {
|
||||
if (typeof options.objname === "number") {
|
||||
if (options.columns !== false) {
|
||||
throw Error(
|
||||
"Invalid Option: objname index cannot be combined with columns or be defined as a field",
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// A string or a buffer
|
||||
if (options.columns === false) {
|
||||
throw Error(
|
||||
"Invalid Option: objname field must be combined with columns or be defined as an index",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Normalize option `on_record`
|
||||
if (options.on_record === undefined || options.on_record === null) {
|
||||
options.on_record = undefined;
|
||||
} else if (typeof options.on_record !== "function") {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_ON_RECORD",
|
||||
[
|
||||
"Invalid option `on_record`:",
|
||||
"expect a function,",
|
||||
`got ${JSON.stringify(options.on_record)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
// Normalize option `on_skip`
|
||||
// options.on_skip ??= (err, chunk) => {
|
||||
// this.emit('skip', err, chunk);
|
||||
// };
|
||||
if (
|
||||
options.on_skip !== undefined &&
|
||||
options.on_skip !== null &&
|
||||
typeof options.on_skip !== "function"
|
||||
) {
|
||||
throw new Error(
|
||||
`Invalid Option: on_skip must be a function, got ${JSON.stringify(options.on_skip)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `quote`
|
||||
if (
|
||||
options.quote === null ||
|
||||
options.quote === false ||
|
||||
options.quote === ""
|
||||
) {
|
||||
options.quote = null;
|
||||
} else {
|
||||
if (options.quote === undefined || options.quote === true) {
|
||||
options.quote = Buffer.from('"', options.encoding);
|
||||
} else if (typeof options.quote === "string") {
|
||||
options.quote = Buffer.from(options.quote, options.encoding);
|
||||
}
|
||||
if (!Buffer.isBuffer(options.quote)) {
|
||||
throw new Error(
|
||||
`Invalid Option: quote must be a buffer or a string, got ${JSON.stringify(options.quote)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
// Normalize option `raw`
|
||||
if (
|
||||
options.raw === undefined ||
|
||||
options.raw === null ||
|
||||
options.raw === false
|
||||
) {
|
||||
options.raw = false;
|
||||
} else if (options.raw !== true) {
|
||||
throw new Error(
|
||||
`Invalid Option: raw must be true, got ${JSON.stringify(options.raw)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `record_delimiter`
|
||||
if (options.record_delimiter === undefined) {
|
||||
options.record_delimiter = [];
|
||||
} else if (
|
||||
typeof options.record_delimiter === "string" ||
|
||||
Buffer.isBuffer(options.record_delimiter)
|
||||
) {
|
||||
if (options.record_delimiter.length === 0) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_RECORD_DELIMITER",
|
||||
[
|
||||
"Invalid option `record_delimiter`:",
|
||||
"value must be a non empty string or buffer,",
|
||||
`got ${JSON.stringify(options.record_delimiter)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
options.record_delimiter = [options.record_delimiter];
|
||||
} else if (!Array.isArray(options.record_delimiter)) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_RECORD_DELIMITER",
|
||||
[
|
||||
"Invalid option `record_delimiter`:",
|
||||
"value must be a string, a buffer or array of string|buffer,",
|
||||
`got ${JSON.stringify(options.record_delimiter)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
options.record_delimiter = options.record_delimiter.map(function (rd, i) {
|
||||
if (typeof rd !== "string" && !Buffer.isBuffer(rd)) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_RECORD_DELIMITER",
|
||||
[
|
||||
"Invalid option `record_delimiter`:",
|
||||
"value must be a string, a buffer or array of string|buffer",
|
||||
`at index ${i},`,
|
||||
`got ${JSON.stringify(rd)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
} else if (rd.length === 0) {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_OPTION_RECORD_DELIMITER",
|
||||
[
|
||||
"Invalid option `record_delimiter`:",
|
||||
"value must be a non empty string or buffer",
|
||||
`at index ${i},`,
|
||||
`got ${JSON.stringify(rd)}`,
|
||||
],
|
||||
options,
|
||||
);
|
||||
}
|
||||
if (typeof rd === "string") {
|
||||
rd = Buffer.from(rd, options.encoding);
|
||||
}
|
||||
return rd;
|
||||
});
|
||||
// Normalize option `relax_column_count`
|
||||
if (typeof options.relax_column_count === "boolean") {
|
||||
// Great, nothing to do
|
||||
} else if (
|
||||
options.relax_column_count === undefined ||
|
||||
options.relax_column_count === null
|
||||
) {
|
||||
options.relax_column_count = false;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: relax_column_count must be a boolean, got ${JSON.stringify(options.relax_column_count)}`,
|
||||
);
|
||||
}
|
||||
if (typeof options.relax_column_count_less === "boolean") {
|
||||
// Great, nothing to do
|
||||
} else if (
|
||||
options.relax_column_count_less === undefined ||
|
||||
options.relax_column_count_less === null
|
||||
) {
|
||||
options.relax_column_count_less = false;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: relax_column_count_less must be a boolean, got ${JSON.stringify(options.relax_column_count_less)}`,
|
||||
);
|
||||
}
|
||||
if (typeof options.relax_column_count_more === "boolean") {
|
||||
// Great, nothing to do
|
||||
} else if (
|
||||
options.relax_column_count_more === undefined ||
|
||||
options.relax_column_count_more === null
|
||||
) {
|
||||
options.relax_column_count_more = false;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: relax_column_count_more must be a boolean, got ${JSON.stringify(options.relax_column_count_more)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `relax_quotes`
|
||||
if (typeof options.relax_quotes === "boolean") {
|
||||
// Great, nothing to do
|
||||
} else if (
|
||||
options.relax_quotes === undefined ||
|
||||
options.relax_quotes === null
|
||||
) {
|
||||
options.relax_quotes = false;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: relax_quotes must be a boolean, got ${JSON.stringify(options.relax_quotes)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `skip_empty_lines`
|
||||
if (typeof options.skip_empty_lines === "boolean") {
|
||||
// Great, nothing to do
|
||||
} else if (
|
||||
options.skip_empty_lines === undefined ||
|
||||
options.skip_empty_lines === null
|
||||
) {
|
||||
options.skip_empty_lines = false;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: skip_empty_lines must be a boolean, got ${JSON.stringify(options.skip_empty_lines)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `skip_records_with_empty_values`
|
||||
if (typeof options.skip_records_with_empty_values === "boolean") {
|
||||
// Great, nothing to do
|
||||
} else if (
|
||||
options.skip_records_with_empty_values === undefined ||
|
||||
options.skip_records_with_empty_values === null
|
||||
) {
|
||||
options.skip_records_with_empty_values = false;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: skip_records_with_empty_values must be a boolean, got ${JSON.stringify(options.skip_records_with_empty_values)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `skip_records_with_error`
|
||||
if (typeof options.skip_records_with_error === "boolean") {
|
||||
// Great, nothing to do
|
||||
} else if (
|
||||
options.skip_records_with_error === undefined ||
|
||||
options.skip_records_with_error === null
|
||||
) {
|
||||
options.skip_records_with_error = false;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: skip_records_with_error must be a boolean, got ${JSON.stringify(options.skip_records_with_error)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `rtrim`
|
||||
if (
|
||||
options.rtrim === undefined ||
|
||||
options.rtrim === null ||
|
||||
options.rtrim === false
|
||||
) {
|
||||
options.rtrim = false;
|
||||
} else if (options.rtrim !== true) {
|
||||
throw new Error(
|
||||
`Invalid Option: rtrim must be a boolean, got ${JSON.stringify(options.rtrim)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `ltrim`
|
||||
if (
|
||||
options.ltrim === undefined ||
|
||||
options.ltrim === null ||
|
||||
options.ltrim === false
|
||||
) {
|
||||
options.ltrim = false;
|
||||
} else if (options.ltrim !== true) {
|
||||
throw new Error(
|
||||
`Invalid Option: ltrim must be a boolean, got ${JSON.stringify(options.ltrim)}`,
|
||||
);
|
||||
}
|
||||
// Normalize option `trim`
|
||||
if (
|
||||
options.trim === undefined ||
|
||||
options.trim === null ||
|
||||
options.trim === false
|
||||
) {
|
||||
options.trim = false;
|
||||
} else if (options.trim !== true) {
|
||||
throw new Error(
|
||||
`Invalid Option: trim must be a boolean, got ${JSON.stringify(options.trim)}`,
|
||||
);
|
||||
}
|
||||
// Normalize options `trim`, `ltrim` and `rtrim`
|
||||
if (options.trim === true && opts.ltrim !== false) {
|
||||
options.ltrim = true;
|
||||
} else if (options.ltrim !== true) {
|
||||
options.ltrim = false;
|
||||
}
|
||||
if (options.trim === true && opts.rtrim !== false) {
|
||||
options.rtrim = true;
|
||||
} else if (options.rtrim !== true) {
|
||||
options.rtrim = false;
|
||||
}
|
||||
// Normalize option `to`
|
||||
if (options.to === undefined || options.to === null) {
|
||||
options.to = -1;
|
||||
} else if (options.to !== -1) {
|
||||
if (typeof options.to === "string" && /\d+/.test(options.to)) {
|
||||
options.to = parseInt(options.to);
|
||||
}
|
||||
if (Number.isInteger(options.to)) {
|
||||
if (options.to <= 0) {
|
||||
throw new Error(
|
||||
`Invalid Option: to must be a positive integer greater than 0, got ${JSON.stringify(opts.to)}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: to must be an integer, got ${JSON.stringify(opts.to)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
// Normalize option `to_line`
|
||||
if (options.to_line === undefined || options.to_line === null) {
|
||||
options.to_line = -1;
|
||||
} else if (options.to_line !== -1) {
|
||||
if (typeof options.to_line === "string" && /\d+/.test(options.to_line)) {
|
||||
options.to_line = parseInt(options.to_line);
|
||||
}
|
||||
if (Number.isInteger(options.to_line)) {
|
||||
if (options.to_line <= 0) {
|
||||
throw new Error(
|
||||
`Invalid Option: to_line must be a positive integer greater than 0, got ${JSON.stringify(opts.to_line)}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid Option: to_line must be an integer, got ${JSON.stringify(opts.to_line)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
return options;
|
||||
};
|
||||
|
||||
export { normalize_options };
|
||||
507
Jira_helper/node_modules/csv-parse/lib/index.d.ts
generated
vendored
Normal file
507
Jira_helper/node_modules/csv-parse/lib/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,507 @@
|
||||
// Original definitions in https://github.com/DefinitelyTyped/DefinitelyTyped by: David Muller <https://github.com/davidm77>
|
||||
|
||||
/// <reference types="node" />
|
||||
|
||||
import * as stream from "stream";
|
||||
|
||||
export type Callback<T = string[]> = (
|
||||
err: CsvError | undefined,
|
||||
records: T[],
|
||||
info?: Info,
|
||||
) => void;
|
||||
|
||||
// export interface Parser extends stream.Transform {}
|
||||
|
||||
// export class Parser<T> extends stream.Transform {
|
||||
export class Parser extends stream.Transform {
|
||||
constructor(options: Options);
|
||||
|
||||
// __push(line: T): CsvError | undefined;
|
||||
__push(line: any): CsvError | undefined;
|
||||
|
||||
// __write(chars: any, end: any, callback: any): any;
|
||||
|
||||
readonly options: OptionsNormalized;
|
||||
|
||||
readonly info: Info;
|
||||
}
|
||||
|
||||
export interface Info {
|
||||
/**
|
||||
* Count the number of lines being fully commented.
|
||||
*/
|
||||
readonly comment_lines: number;
|
||||
/**
|
||||
* Count the number of processed empty lines.
|
||||
*/
|
||||
readonly empty_lines: number;
|
||||
/**
|
||||
* The number of lines encountered in the source dataset, start at 1 for the first line.
|
||||
*/
|
||||
readonly lines: number;
|
||||
/**
|
||||
* Count the number of processed records.
|
||||
*/
|
||||
readonly records: number;
|
||||
/**
|
||||
* Count of the number of processed bytes.
|
||||
*/
|
||||
readonly bytes: number;
|
||||
/**
|
||||
* Number of non uniform records when `relax_column_count` is true.
|
||||
*/
|
||||
readonly invalid_field_length: number;
|
||||
/**
|
||||
* Normalized verion of `options.columns` when `options.columns` is true, boolean otherwise.
|
||||
*/
|
||||
readonly columns: boolean | { name: string }[] | { disabled: true }[];
|
||||
}
|
||||
|
||||
export interface CastingContext {
|
||||
readonly column: number | string;
|
||||
readonly empty_lines: number;
|
||||
readonly error: CsvError;
|
||||
readonly header: boolean;
|
||||
readonly index: number;
|
||||
readonly quoting: boolean;
|
||||
readonly lines: number;
|
||||
readonly raw: string | undefined;
|
||||
readonly records: number;
|
||||
readonly invalid_field_length: number;
|
||||
}
|
||||
|
||||
export type CastingFunction = (
|
||||
value: string,
|
||||
context: CastingContext,
|
||||
) => unknown;
|
||||
|
||||
export type CastingDateFunction = (
|
||||
value: string,
|
||||
context: CastingContext,
|
||||
) => Date;
|
||||
|
||||
export type ColumnOption<K = string> =
|
||||
| K
|
||||
| undefined
|
||||
| null
|
||||
| false
|
||||
| { name: K };
|
||||
|
||||
export interface OptionsNormalized<T = string[]> {
|
||||
auto_parse?: boolean | CastingFunction;
|
||||
auto_parse_date?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* If true, detect and exclude the byte order mark (BOM) from the CSV input if present.
|
||||
*/
|
||||
bom?: boolean;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to native types.
|
||||
* If a function, receive the value as first argument, a context as second argument and return a new value. More information about the context properties is available below.
|
||||
*/
|
||||
cast?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to dates.
|
||||
* If a function, receive the value as argument and return a new value. It requires the "auto_parse" option. Be careful, it relies on Date.parse.
|
||||
*/
|
||||
cast_date?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* Internal property string the function to
|
||||
*/
|
||||
cast_first_line_to_header?: (
|
||||
record: T,
|
||||
) => ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[];
|
||||
/**
|
||||
* List of fields as an array, a user defined callback accepting the first
|
||||
* line and returning the column names or true if autodiscovered in the first
|
||||
* CSV line, default to null, affect the result data set in the sense that
|
||||
* records will be objects instead of arrays.
|
||||
*/
|
||||
columns:
|
||||
| boolean
|
||||
| ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[];
|
||||
/**
|
||||
* Convert values into an array of values when columns are activated and
|
||||
* when multiple columns of the same name are found.
|
||||
*/
|
||||
group_columns_by_name: boolean;
|
||||
/**
|
||||
* Treat all the characters after this one as a comment, default to '' (disabled).
|
||||
*/
|
||||
comment: string | null;
|
||||
/**
|
||||
* Restrict the definition of comments to a full line. Comment characters
|
||||
* defined in the middle of the line are not interpreted as such. The
|
||||
* option require the activation of comments.
|
||||
*/
|
||||
comment_no_infix: boolean;
|
||||
/**
|
||||
* Set the field delimiter. One character only, defaults to comma.
|
||||
*/
|
||||
delimiter: Buffer[];
|
||||
/**
|
||||
* Set the source and destination encoding, a value of `null` returns buffer instead of strings.
|
||||
*/
|
||||
encoding: BufferEncoding | null;
|
||||
/**
|
||||
* Set the escape character, one character only, defaults to double quotes.
|
||||
*/
|
||||
escape: null | Buffer;
|
||||
/**
|
||||
* Start handling records from the requested number of records.
|
||||
*/
|
||||
from: number;
|
||||
/**
|
||||
* Start handling records from the requested line number.
|
||||
*/
|
||||
from_line: number;
|
||||
/**
|
||||
* Don't interpret delimiters as such in the last field according to the number of fields calculated from the number of columns, the option require the presence of the `column` option when `true`.
|
||||
*/
|
||||
ignore_last_delimiters: boolean | number;
|
||||
/**
|
||||
* Generate two properties `info` and `record` where `info` is a snapshot of the info object at the time the record was created and `record` is the parsed array or object.
|
||||
*/
|
||||
info: boolean;
|
||||
/**
|
||||
* If true, ignore whitespace immediately following the delimiter (i.e. left-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
ltrim: boolean;
|
||||
/**
|
||||
* Maximum numer of characters to be contained in the field and line buffers before an exception is raised,
|
||||
* used to guard against a wrong delimiter or record_delimiter,
|
||||
* default to 128000 characters.
|
||||
*/
|
||||
max_record_size: number;
|
||||
/**
|
||||
* Name of header-record title to name objects by.
|
||||
*/
|
||||
objname: number | string | undefined;
|
||||
/**
|
||||
* Alter and filter records by executing a user defined function.
|
||||
*/
|
||||
on_record?: (record: T, context: CastingContext) => T | undefined;
|
||||
/**
|
||||
* Optional character surrounding a field, one character only, defaults to double quotes.
|
||||
*/
|
||||
quote?: Buffer | null;
|
||||
/**
|
||||
* Generate two properties raw and row where raw is the original CSV row content and row is the parsed array or object.
|
||||
*/
|
||||
raw: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count, default to false.
|
||||
*/
|
||||
relax_column_count: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains less fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_less: boolean;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains more fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_more: boolean;
|
||||
/**
|
||||
* Preserve quotes inside unquoted field.
|
||||
*/
|
||||
relax_quotes: boolean;
|
||||
/**
|
||||
* One or multiple characters used to delimit record rows; defaults to auto discovery if not provided.
|
||||
* Supported auto discovery method are Linux ("\n"), Apple ("\r") and Windows ("\r\n") row delimiters.
|
||||
*/
|
||||
record_delimiter: Buffer[];
|
||||
/**
|
||||
* If true, ignore whitespace immediately preceding the delimiter (i.e. right-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
rtrim: boolean;
|
||||
/**
|
||||
* Dont generate empty values for empty lines.
|
||||
* Defaults to false
|
||||
*/
|
||||
skip_empty_lines: boolean;
|
||||
/**
|
||||
* Skip a line with error found inside and directly go process the next line.
|
||||
*/
|
||||
skip_records_with_error: boolean;
|
||||
/**
|
||||
* Don't generate records for lines containing empty column values (column matching /\s*\/), defaults to false.
|
||||
*/
|
||||
skip_records_with_empty_values: boolean;
|
||||
/**
|
||||
* Stop handling records after the requested number of records.
|
||||
*/
|
||||
to: number;
|
||||
/**
|
||||
* Stop handling records after the requested line number.
|
||||
*/
|
||||
to_line: number;
|
||||
/**
|
||||
* If true, ignore whitespace immediately around the delimiter, defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
trim: boolean;
|
||||
}
|
||||
|
||||
/*
|
||||
Note, could not `extends stream.TransformOptions` because encoding can be
|
||||
BufferEncoding and undefined as well as null which is not defined in the
|
||||
extended type.
|
||||
*/
|
||||
export interface Options<T = string[]> {
|
||||
/**
|
||||
* If true, the parser will attempt to convert read data types to native types.
|
||||
* @deprecated Use {@link cast}
|
||||
*/
|
||||
auto_parse?: boolean | CastingFunction;
|
||||
autoParse?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert read data types to dates. It requires the "auto_parse" option.
|
||||
* @deprecated Use {@link cast_date}
|
||||
*/
|
||||
auto_parse_date?: boolean | CastingDateFunction;
|
||||
autoParseDate?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* If true, detect and exclude the byte order mark (BOM) from the CSV input if present.
|
||||
*/
|
||||
bom?: boolean;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to native types.
|
||||
* If a function, receive the value as first argument, a context as second argument and return a new value. More information about the context properties is available below.
|
||||
*/
|
||||
cast?: boolean | CastingFunction;
|
||||
/**
|
||||
* If true, the parser will attempt to convert input string to dates.
|
||||
* If a function, receive the value as argument and return a new value. It requires the "auto_parse" option. Be careful, it relies on Date.parse.
|
||||
*/
|
||||
cast_date?: boolean | CastingDateFunction;
|
||||
castDate?: boolean | CastingDateFunction;
|
||||
/**
|
||||
* List of fields as an array,
|
||||
* a user defined callback accepting the first line and returning the column names or true if autodiscovered in the first CSV line,
|
||||
* default to null,
|
||||
* affect the result data set in the sense that records will be objects instead of arrays.
|
||||
*/
|
||||
columns?:
|
||||
| boolean
|
||||
| ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[]
|
||||
| ((
|
||||
record: T,
|
||||
) => ColumnOption<
|
||||
T extends string[] ? string : T extends unknown ? string : keyof T
|
||||
>[]);
|
||||
/**
|
||||
* Convert values into an array of values when columns are activated and
|
||||
* when multiple columns of the same name are found.
|
||||
*/
|
||||
group_columns_by_name?: boolean;
|
||||
groupColumnsByName?: boolean;
|
||||
/**
|
||||
* Treat all the characters after this one as a comment, default to '' (disabled).
|
||||
*/
|
||||
comment?: string | boolean | null;
|
||||
/**
|
||||
* Restrict the definition of comments to a full line. Comment characters
|
||||
* defined in the middle of the line are not interpreted as such. The
|
||||
* option require the activation of comments.
|
||||
*/
|
||||
comment_no_infix?: boolean | null;
|
||||
/**
|
||||
* Set the field delimiter. One character only, defaults to comma.
|
||||
*/
|
||||
delimiter?: string | string[] | Buffer;
|
||||
/**
|
||||
* Set the source and destination encoding, a value of `null` returns buffer instead of strings.
|
||||
*/
|
||||
encoding?: BufferEncoding | boolean | null | undefined;
|
||||
/**
|
||||
* Set the escape character, one character only, defaults to double quotes.
|
||||
*/
|
||||
escape?: string | null | boolean | Buffer;
|
||||
/**
|
||||
* Start handling records from the requested number of records.
|
||||
*/
|
||||
from?: number | string;
|
||||
/**
|
||||
* Start handling records from the requested line number.
|
||||
*/
|
||||
from_line?: null | number | string;
|
||||
fromLine?: null | number | string;
|
||||
/**
|
||||
* Don't interpret delimiters as such in the last field according to the number of fields calculated from the number of columns, the option require the presence of the `column` option when `true`.
|
||||
*/
|
||||
ignore_last_delimiters?: boolean | number;
|
||||
/**
|
||||
* Generate two properties `info` and `record` where `info` is a snapshot of the info object at the time the record was created and `record` is the parsed array or object.
|
||||
*/
|
||||
info?: boolean;
|
||||
/**
|
||||
* If true, ignore whitespace immediately following the delimiter (i.e. left-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
ltrim?: boolean | null;
|
||||
/**
|
||||
* Maximum numer of characters to be contained in the field and line buffers before an exception is raised,
|
||||
* used to guard against a wrong delimiter or record_delimiter,
|
||||
* default to 128000 characters.
|
||||
*/
|
||||
max_record_size?: number | null | string;
|
||||
maxRecordSize?: number;
|
||||
/**
|
||||
* Name of header-record title to name objects by.
|
||||
*/
|
||||
objname?: Buffer | null | number | string;
|
||||
/**
|
||||
* Alter and filter records by executing a user defined function.
|
||||
*/
|
||||
on_record?: (record: T, context: CastingContext) => T | null | undefined;
|
||||
onRecord?: (record: T, context: CastingContext) => T | null | undefined;
|
||||
/**
|
||||
* Function called when an error occured if the `skip_records_with_error`
|
||||
* option is activated.
|
||||
*/
|
||||
on_skip?: (err: CsvError | undefined, raw: string | undefined) => undefined;
|
||||
onSkip?: (err: CsvError | undefined, raw: string | undefined) => undefined;
|
||||
/**
|
||||
* Optional character surrounding a field, one character only, defaults to double quotes.
|
||||
*/
|
||||
quote?: string | boolean | Buffer | null;
|
||||
/**
|
||||
* Generate two properties raw and row where raw is the original CSV row content and row is the parsed array or object.
|
||||
*/
|
||||
raw?: boolean | null;
|
||||
/**
|
||||
* One or multiple characters used to delimit record rows; defaults to auto discovery if not provided.
|
||||
* Supported auto discovery method are Linux ("\n"), Apple ("\r") and Windows ("\r\n") row delimiters.
|
||||
*/
|
||||
record_delimiter?: string | Buffer | null | (string | Buffer | null)[];
|
||||
recordDelimiter?: string | Buffer | null | (string | Buffer | null)[];
|
||||
/**
|
||||
* Discard inconsistent columns count, default to false.
|
||||
*/
|
||||
relax_column_count?: boolean | null;
|
||||
relaxColumnCount?: boolean | null;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains less fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_less?: boolean | null;
|
||||
relaxColumnCountLess?: boolean | null;
|
||||
/**
|
||||
* Discard inconsistent columns count when the record contains more fields than expected, default to false.
|
||||
*/
|
||||
relax_column_count_more?: boolean | null;
|
||||
relaxColumnCountMore?: boolean | null;
|
||||
/**
|
||||
* Preserve quotes inside unquoted field.
|
||||
*/
|
||||
relax_quotes?: boolean | null;
|
||||
relaxQuotes?: boolean | null;
|
||||
/**
|
||||
* If true, ignore whitespace immediately preceding the delimiter (i.e. right-trim all fields), defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
rtrim?: boolean | null;
|
||||
/**
|
||||
* Dont generate empty values for empty lines.
|
||||
* Defaults to false
|
||||
*/
|
||||
skip_empty_lines?: boolean | null;
|
||||
skipEmptyLines?: boolean | null;
|
||||
/**
|
||||
* Don't generate records for lines containing empty column values (column matching /\s*\/), defaults to false.
|
||||
*/
|
||||
skip_records_with_empty_values?: boolean | null;
|
||||
skipRecordsWithEmptyValues?: boolean | null;
|
||||
/**
|
||||
* Skip a line with error found inside and directly go process the next line.
|
||||
*/
|
||||
skip_records_with_error?: boolean | null;
|
||||
skipRecordsWithError?: boolean | null;
|
||||
/**
|
||||
* Stop handling records after the requested number of records.
|
||||
*/
|
||||
to?: null | number | string;
|
||||
/**
|
||||
* Stop handling records after the requested line number.
|
||||
*/
|
||||
to_line?: null | number | string;
|
||||
toLine?: null | number | string;
|
||||
/**
|
||||
* If true, ignore whitespace immediately around the delimiter, defaults to false.
|
||||
* Does not remove whitespace in a quoted field.
|
||||
*/
|
||||
trim?: boolean | null;
|
||||
}
|
||||
|
||||
export type CsvErrorCode =
|
||||
| "CSV_INVALID_ARGUMENT"
|
||||
| "CSV_INVALID_CLOSING_QUOTE"
|
||||
| "CSV_INVALID_COLUMN_DEFINITION"
|
||||
| "CSV_INVALID_COLUMN_MAPPING"
|
||||
| "CSV_INVALID_OPTION_BOM"
|
||||
| "CSV_INVALID_OPTION_CAST"
|
||||
| "CSV_INVALID_OPTION_CAST_DATE"
|
||||
| "CSV_INVALID_OPTION_COLUMNS"
|
||||
| "CSV_INVALID_OPTION_COMMENT"
|
||||
| "CSV_INVALID_OPTION_DELIMITER"
|
||||
| "CSV_INVALID_OPTION_GROUP_COLUMNS_BY_NAME"
|
||||
| "CSV_INVALID_OPTION_ON_RECORD"
|
||||
| "CSV_MAX_RECORD_SIZE"
|
||||
| "CSV_NON_TRIMABLE_CHAR_AFTER_CLOSING_QUOTE"
|
||||
| "CSV_OPTION_COLUMNS_MISSING_NAME"
|
||||
| "CSV_QUOTE_NOT_CLOSED"
|
||||
| "CSV_RECORD_INCONSISTENT_FIELDS_LENGTH"
|
||||
| "CSV_RECORD_INCONSISTENT_COLUMNS"
|
||||
| "CSV_UNKNOWN_ERROR"
|
||||
| "INVALID_OPENING_QUOTE";
|
||||
|
||||
export class CsvError extends Error {
|
||||
readonly code: CsvErrorCode;
|
||||
[key: string]: any;
|
||||
|
||||
constructor(
|
||||
code: CsvErrorCode,
|
||||
message: string | string[],
|
||||
options?: OptionsNormalized,
|
||||
...contexts: unknown[]
|
||||
);
|
||||
}
|
||||
|
||||
type OptionsWithColumns<T> = Omit<Options<T>, "columns"> & {
|
||||
columns: Exclude<Options["columns"], undefined | false>;
|
||||
};
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
input: string | Buffer | Uint8Array,
|
||||
options: OptionsWithColumns<T>,
|
||||
callback?: Callback<T>,
|
||||
): Parser;
|
||||
declare function parse(
|
||||
input: string | Buffer | Uint8Array,
|
||||
options: Options,
|
||||
callback?: Callback,
|
||||
): Parser;
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
options: OptionsWithColumns<T>,
|
||||
callback?: Callback<T>,
|
||||
): Parser;
|
||||
declare function parse(options: Options, callback?: Callback): Parser;
|
||||
|
||||
declare function parse(
|
||||
input: string | Buffer | Uint8Array,
|
||||
callback?: Callback,
|
||||
): Parser;
|
||||
declare function parse(callback?: Callback): Parser;
|
||||
|
||||
// export default parse;
|
||||
export { parse };
|
||||
|
||||
declare function normalize_options(opts: Options): OptionsNormalized;
|
||||
export { normalize_options };
|
||||
138
Jira_helper/node_modules/csv-parse/lib/index.js
generated
vendored
Normal file
138
Jira_helper/node_modules/csv-parse/lib/index.js
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
CSV Parse
|
||||
|
||||
Please look at the [project documentation](https://csv.js.org/parse/) for
|
||||
additional information.
|
||||
*/
|
||||
|
||||
import { Transform } from "stream";
|
||||
import { is_object } from "./utils/is_object.js";
|
||||
import { transform } from "./api/index.js";
|
||||
import { CsvError } from "./api/CsvError.js";
|
||||
import { normalize_options } from "./api/normalize_options.js";
|
||||
|
||||
class Parser extends Transform {
|
||||
constructor(opts = {}) {
|
||||
super({ ...{ readableObjectMode: true }, ...opts, encoding: null });
|
||||
this.api = transform({
|
||||
on_skip: (err, chunk) => {
|
||||
this.emit("skip", err, chunk);
|
||||
},
|
||||
...opts,
|
||||
});
|
||||
// Backward compatibility
|
||||
this.state = this.api.state;
|
||||
this.options = this.api.options;
|
||||
this.info = this.api.info;
|
||||
}
|
||||
// Implementation of `Transform._transform`
|
||||
_transform(buf, _, callback) {
|
||||
if (this.state.stop === true) {
|
||||
return;
|
||||
}
|
||||
const err = this.api.parse(
|
||||
buf,
|
||||
false,
|
||||
(record) => {
|
||||
this.push(record);
|
||||
},
|
||||
() => {
|
||||
this.push(null);
|
||||
this.end();
|
||||
// Fix #333 and break #410
|
||||
// ko: api.stream.iterator.coffee
|
||||
// ko with v21.4.0, ok with node v20.5.1: api.stream.finished # aborted (with generate())
|
||||
// ko: api.stream.finished # aborted (with Readable)
|
||||
// this.destroy()
|
||||
// Fix #410 and partially break #333
|
||||
// ok: api.stream.iterator.coffee
|
||||
// ok: api.stream.finished # aborted (with generate())
|
||||
// broken: api.stream.finished # aborted (with Readable)
|
||||
this.on("end", this.destroy);
|
||||
},
|
||||
);
|
||||
if (err !== undefined) {
|
||||
this.state.stop = true;
|
||||
}
|
||||
callback(err);
|
||||
}
|
||||
// Implementation of `Transform._flush`
|
||||
_flush(callback) {
|
||||
if (this.state.stop === true) {
|
||||
return;
|
||||
}
|
||||
const err = this.api.parse(
|
||||
undefined,
|
||||
true,
|
||||
(record) => {
|
||||
this.push(record);
|
||||
},
|
||||
() => {
|
||||
this.push(null);
|
||||
this.on("end", this.destroy);
|
||||
},
|
||||
);
|
||||
callback(err);
|
||||
}
|
||||
}
|
||||
|
||||
const parse = function () {
|
||||
let data, options, callback;
|
||||
for (const i in arguments) {
|
||||
const argument = arguments[i];
|
||||
const type = typeof argument;
|
||||
if (
|
||||
data === undefined &&
|
||||
(typeof argument === "string" || Buffer.isBuffer(argument))
|
||||
) {
|
||||
data = argument;
|
||||
} else if (options === undefined && is_object(argument)) {
|
||||
options = argument;
|
||||
} else if (callback === undefined && type === "function") {
|
||||
callback = argument;
|
||||
} else {
|
||||
throw new CsvError(
|
||||
"CSV_INVALID_ARGUMENT",
|
||||
["Invalid argument:", `got ${JSON.stringify(argument)} at index ${i}`],
|
||||
options || {},
|
||||
);
|
||||
}
|
||||
}
|
||||
const parser = new Parser(options);
|
||||
if (callback) {
|
||||
const records =
|
||||
options === undefined || options.objname === undefined ? [] : {};
|
||||
parser.on("readable", function () {
|
||||
let record;
|
||||
while ((record = this.read()) !== null) {
|
||||
if (options === undefined || options.objname === undefined) {
|
||||
records.push(record);
|
||||
} else {
|
||||
records[record[0]] = record[1];
|
||||
}
|
||||
}
|
||||
});
|
||||
parser.on("error", function (err) {
|
||||
callback(err, undefined, parser.api.__infoDataSet());
|
||||
});
|
||||
parser.on("end", function () {
|
||||
callback(undefined, records, parser.api.__infoDataSet());
|
||||
});
|
||||
}
|
||||
if (data !== undefined) {
|
||||
const writer = function () {
|
||||
parser.write(data);
|
||||
parser.end();
|
||||
};
|
||||
// Support Deno, Rollup doesnt provide a shim for setImmediate
|
||||
if (typeof setImmediate === "function") {
|
||||
setImmediate(writer);
|
||||
} else {
|
||||
setTimeout(writer, 0);
|
||||
}
|
||||
}
|
||||
return parser;
|
||||
};
|
||||
|
||||
// export default parse
|
||||
export { parse, Parser, CsvError, normalize_options };
|
||||
17
Jira_helper/node_modules/csv-parse/lib/stream.d.ts
generated
vendored
Normal file
17
Jira_helper/node_modules/csv-parse/lib/stream.d.ts
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
import { Options } from "./index.js";
|
||||
|
||||
declare function parse(options?: Options): TransformStream;
|
||||
// export default parse;
|
||||
export { parse };
|
||||
|
||||
export {
|
||||
CastingContext,
|
||||
CastingFunction,
|
||||
CastingDateFunction,
|
||||
ColumnOption,
|
||||
Options,
|
||||
OptionsNormalized,
|
||||
Info,
|
||||
CsvErrorCode,
|
||||
CsvError,
|
||||
} from "./index.js";
|
||||
36
Jira_helper/node_modules/csv-parse/lib/stream.js
generated
vendored
Normal file
36
Jira_helper/node_modules/csv-parse/lib/stream.js
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
import { TransformStream, CountQueuingStrategy } from "node:stream/web";
|
||||
import { transform } from "./api/index.js";
|
||||
|
||||
const parse = (opts) => {
|
||||
const api = transform(opts);
|
||||
let controller;
|
||||
const enqueue = (record) => {
|
||||
controller.enqueue(record);
|
||||
};
|
||||
const terminate = () => {
|
||||
controller.terminate();
|
||||
};
|
||||
return new TransformStream(
|
||||
{
|
||||
start(ctr) {
|
||||
controller = ctr;
|
||||
},
|
||||
transform(chunk) {
|
||||
const error = api.parse(chunk, false, enqueue, terminate);
|
||||
if (error) {
|
||||
controller.error(error);
|
||||
}
|
||||
},
|
||||
flush() {
|
||||
const error = api.parse(undefined, true, enqueue, terminate);
|
||||
if (error) {
|
||||
controller.error(error);
|
||||
}
|
||||
},
|
||||
},
|
||||
new CountQueuingStrategy({ highWaterMark: 1024 }),
|
||||
new CountQueuingStrategy({ highWaterMark: 1024 }),
|
||||
);
|
||||
};
|
||||
|
||||
export { parse };
|
||||
30
Jira_helper/node_modules/csv-parse/lib/sync.d.ts
generated
vendored
Normal file
30
Jira_helper/node_modules/csv-parse/lib/sync.d.ts
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
import { Options } from "./index.js";
|
||||
|
||||
type OptionsWithColumns<T> = Omit<Options<T>, "columns"> & {
|
||||
columns: Exclude<Options["columns"], undefined | false>;
|
||||
};
|
||||
|
||||
declare function parse<T = unknown>(
|
||||
input: Buffer | string | Uint8Array,
|
||||
options: OptionsWithColumns<T>,
|
||||
): T[];
|
||||
declare function parse(
|
||||
input: Buffer | string | Uint8Array,
|
||||
options: Options,
|
||||
): string[][];
|
||||
declare function parse(input: Buffer | string | Uint8Array): string[][];
|
||||
|
||||
// export default parse;
|
||||
export { parse };
|
||||
|
||||
export {
|
||||
CastingContext,
|
||||
CastingFunction,
|
||||
CastingDateFunction,
|
||||
ColumnOption,
|
||||
Options,
|
||||
OptionsNormalized,
|
||||
Info,
|
||||
CsvErrorCode,
|
||||
CsvError,
|
||||
} from "./index.js";
|
||||
28
Jira_helper/node_modules/csv-parse/lib/sync.js
generated
vendored
Normal file
28
Jira_helper/node_modules/csv-parse/lib/sync.js
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
import { CsvError, transform } from "./api/index.js";
|
||||
|
||||
const parse = function (data, opts = {}) {
|
||||
if (typeof data === "string") {
|
||||
data = Buffer.from(data);
|
||||
}
|
||||
const records = opts && opts.objname ? {} : [];
|
||||
const parser = transform(opts);
|
||||
const push = (record) => {
|
||||
if (parser.options.objname === undefined) records.push(record);
|
||||
else {
|
||||
records[record[0]] = record[1];
|
||||
}
|
||||
};
|
||||
const close = () => {};
|
||||
const error = parser.parse(data, true, push, close);
|
||||
if (error !== undefined) throw error;
|
||||
// 250606: `parser.parse` was implemented as 2 calls:
|
||||
// const err1 = parser.parse(data, false, push, close);
|
||||
// if (err1 !== undefined) throw err1;
|
||||
// const err2 = parser.parse(undefined, true, push, close);
|
||||
// if (err2 !== undefined) throw err2;
|
||||
return records;
|
||||
};
|
||||
|
||||
// export default parse
|
||||
export { parse };
|
||||
export { CsvError };
|
||||
63
Jira_helper/node_modules/csv-parse/lib/utils/ResizeableBuffer.js
generated
vendored
Normal file
63
Jira_helper/node_modules/csv-parse/lib/utils/ResizeableBuffer.js
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
class ResizeableBuffer {
|
||||
constructor(size = 100) {
|
||||
this.size = size;
|
||||
this.length = 0;
|
||||
this.buf = Buffer.allocUnsafe(size);
|
||||
}
|
||||
prepend(val) {
|
||||
if (Buffer.isBuffer(val)) {
|
||||
const length = this.length + val.length;
|
||||
if (length >= this.size) {
|
||||
this.resize();
|
||||
if (length >= this.size) {
|
||||
throw Error("INVALID_BUFFER_STATE");
|
||||
}
|
||||
}
|
||||
const buf = this.buf;
|
||||
this.buf = Buffer.allocUnsafe(this.size);
|
||||
val.copy(this.buf, 0);
|
||||
buf.copy(this.buf, val.length);
|
||||
this.length += val.length;
|
||||
} else {
|
||||
const length = this.length++;
|
||||
if (length === this.size) {
|
||||
this.resize();
|
||||
}
|
||||
const buf = this.clone();
|
||||
this.buf[0] = val;
|
||||
buf.copy(this.buf, 1, 0, length);
|
||||
}
|
||||
}
|
||||
append(val) {
|
||||
const length = this.length++;
|
||||
if (length === this.size) {
|
||||
this.resize();
|
||||
}
|
||||
this.buf[length] = val;
|
||||
}
|
||||
clone() {
|
||||
return Buffer.from(this.buf.slice(0, this.length));
|
||||
}
|
||||
resize() {
|
||||
const length = this.length;
|
||||
this.size = this.size * 2;
|
||||
const buf = Buffer.allocUnsafe(this.size);
|
||||
this.buf.copy(buf, 0, 0, length);
|
||||
this.buf = buf;
|
||||
}
|
||||
toString(encoding) {
|
||||
if (encoding) {
|
||||
return this.buf.slice(0, this.length).toString(encoding);
|
||||
} else {
|
||||
return Uint8Array.prototype.slice.call(this.buf.slice(0, this.length));
|
||||
}
|
||||
}
|
||||
toJSON() {
|
||||
return this.toString("utf8");
|
||||
}
|
||||
reset() {
|
||||
this.length = 0;
|
||||
}
|
||||
}
|
||||
|
||||
export default ResizeableBuffer;
|
||||
5
Jira_helper/node_modules/csv-parse/lib/utils/is_object.js
generated
vendored
Normal file
5
Jira_helper/node_modules/csv-parse/lib/utils/is_object.js
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
const is_object = function (obj) {
|
||||
return typeof obj === "object" && obj !== null && !Array.isArray(obj);
|
||||
};
|
||||
|
||||
export { is_object };
|
||||
7
Jira_helper/node_modules/csv-parse/lib/utils/underscore.js
generated
vendored
Normal file
7
Jira_helper/node_modules/csv-parse/lib/utils/underscore.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
const underscore = function (str) {
|
||||
return str.replace(/([A-Z])/g, function (_, match) {
|
||||
return "_" + match.toLowerCase();
|
||||
});
|
||||
};
|
||||
|
||||
export { underscore };
|
||||
144
Jira_helper/node_modules/csv-parse/package.json
generated
vendored
Normal file
144
Jira_helper/node_modules/csv-parse/package.json
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
{
|
||||
"version": "6.1.0",
|
||||
"name": "csv-parse",
|
||||
"description": "CSV parsing implementing the Node.js `stream.Transform` API",
|
||||
"keywords": [
|
||||
"csv",
|
||||
"parse",
|
||||
"parser",
|
||||
"convert",
|
||||
"tsv",
|
||||
"stream",
|
||||
"backend",
|
||||
"frontend"
|
||||
],
|
||||
"author": "David Worms <david@adaltas.com> (https://www.adaltas.com)",
|
||||
"contributors": [
|
||||
"David Worms <david@adaltas.com> (https://www.adaltas.com)",
|
||||
"Will White (https://github.com/willwhite)",
|
||||
"Justin Latimer (https://github.com/justinlatimer)",
|
||||
"jonseymour (https://github.com/jonseymour)",
|
||||
"pascalopitz (https://github.com/pascalopitz)",
|
||||
"Josh Pschorr (https://github.com/jpschorr)",
|
||||
"Elad Ben-Israel (https://github.com/eladb)",
|
||||
"Philippe Plantier (https://github.com/phipla)",
|
||||
"Tim Oxley (https://github.com/timoxley)",
|
||||
"Damon Oehlman (https://github.com/DamonOehlman)",
|
||||
"Alexandru Topliceanu (https://github.com/topliceanu)",
|
||||
"Visup (https://github.com/visup)",
|
||||
"Edmund von der Burg (https://github.com/evdb)",
|
||||
"Douglas Christopher Wilson (https://github.com/dougwilson)",
|
||||
"Joe Eaves (https://github.com/Joeasaurus)",
|
||||
"Mark Stosberg (https://github.com/markstos)"
|
||||
],
|
||||
"exports": {
|
||||
".": {
|
||||
"import": {
|
||||
"types": "./lib/index.d.ts",
|
||||
"default": "./lib/index.js"
|
||||
},
|
||||
"require": {
|
||||
"types": "./dist/cjs/index.d.cts",
|
||||
"default": "./dist/cjs/index.cjs"
|
||||
}
|
||||
},
|
||||
"./sync": {
|
||||
"import": {
|
||||
"types": "./lib/sync.d.ts",
|
||||
"default": "./lib/sync.js"
|
||||
},
|
||||
"require": {
|
||||
"types": "./dist/cjs/sync.d.cts",
|
||||
"default": "./dist/cjs/sync.cjs"
|
||||
}
|
||||
},
|
||||
"./stream": {
|
||||
"import": {
|
||||
"types": "./lib/stream.d.ts",
|
||||
"default": "./lib/stream.js"
|
||||
},
|
||||
"require": {
|
||||
"types": "./dist/cjs/stream.d.cts",
|
||||
"default": "./dist/cjs/stream.cjs"
|
||||
}
|
||||
},
|
||||
"./browser/esm": {
|
||||
"types": "./dist/esm/index.d.ts",
|
||||
"default": "./dist/esm/index.js"
|
||||
},
|
||||
"./browser/esm/sync": {
|
||||
"types": "./dist/esm/sync.d.ts",
|
||||
"default": "./dist/esm/sync.js"
|
||||
}
|
||||
},
|
||||
"devDependencies": {
|
||||
"@rollup/plugin-node-resolve": "^16.0.1",
|
||||
"@types/mocha": "^10.0.10",
|
||||
"@types/node": "^22.15.30",
|
||||
"csv-generate": "^4.5.0",
|
||||
"csv-spectrum": "^2.0.0",
|
||||
"dedent": "^1.6.0",
|
||||
"each": "^2.7.2",
|
||||
"mocha": "^11.5.0",
|
||||
"pad": "^3.3.0",
|
||||
"prettier": "^3.5.3",
|
||||
"rollup": "^4.41.1",
|
||||
"rollup-plugin-node-builtins": "^2.1.2",
|
||||
"rollup-plugin-node-globals": "^1.4.0",
|
||||
"should": "^13.2.3",
|
||||
"stream-transform": "^3.4.0",
|
||||
"ts-node": "^10.9.2",
|
||||
"typescript": "^5.8.3"
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"lib"
|
||||
],
|
||||
"homepage": "https://csv.js.org/parse",
|
||||
"license": "MIT",
|
||||
"main": "./dist/cjs/index.cjs",
|
||||
"mocha": {
|
||||
"inline-diffs": true,
|
||||
"loader": "ts-node/esm",
|
||||
"recursive": true,
|
||||
"reporter": "spec",
|
||||
"throw-deprecation": false,
|
||||
"timeout": 40000
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/adaltas/node-csv.git",
|
||||
"directory": "packages/csv-parse"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "npm run build:rollup && npm run build:ts",
|
||||
"build:rollup": "npx rollup -c",
|
||||
"build:ts": "cp lib/index.d.ts dist/cjs/index.d.cts && cp lib/sync.d.ts dist/cjs/sync.d.cts && cp lib/*.ts dist/esm",
|
||||
"postbuild:ts": "find dist/cjs -name '*.d.cts' -exec sh -c \"sed -i \"s/\\.js'/\\.cjs'/g\" {} || sed -i '' \"s/\\.js'/\\.cjs'/g\" {}\" \\;",
|
||||
"lint:check": "eslint",
|
||||
"lint:fix": "eslint --fix",
|
||||
"lint:ts": "tsc --noEmit true",
|
||||
"preversion": "npm run build && git add dist",
|
||||
"test": "mocha 'test/**/*.{js,ts}'",
|
||||
"test:legacy": "mocha --ignore test/api.web_stream.js --ignore test/api.web_stream.ts --ignore test/api.stream.finished.ts --ignore test/api.stream.iterator.ts 'test/**/*.{js,ts}'"
|
||||
},
|
||||
"type": "module",
|
||||
"types": "dist/esm/index.d.ts",
|
||||
"typesVersions": {
|
||||
"*": {
|
||||
".": [
|
||||
"dist/esm/index.d.ts"
|
||||
],
|
||||
"sync": [
|
||||
"dist/esm/sync.d.ts"
|
||||
],
|
||||
"browser/esm": [
|
||||
"dist/esm/index.d.ts"
|
||||
],
|
||||
"browser/esm/sync": [
|
||||
"dist/esm/sync.d.ts"
|
||||
]
|
||||
}
|
||||
},
|
||||
"gitHead": "c0e42c9cf0ec6b9e1f453279e36f621ce782d2eb"
|
||||
}
|
||||
20
Jira_helper/node_modules/emoji-regex/LICENSE-MIT.txt
generated
vendored
Normal file
20
Jira_helper/node_modules/emoji-regex/LICENSE-MIT.txt
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright Mathias Bynens <https://mathiasbynens.be/>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
107
Jira_helper/node_modules/emoji-regex/README.md
generated
vendored
Normal file
107
Jira_helper/node_modules/emoji-regex/README.md
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
# emoji-regex [](https://github.com/mathiasbynens/emoji-regex/actions/workflows/main.yml) [](https://www.npmjs.com/package/emoji-regex)
|
||||
|
||||
_emoji-regex_ offers a regular expression to match all emoji symbols and sequences (including textual representations of emoji) as per the Unicode Standard. It’s based on [_emoji-test-regex-pattern_](https://github.com/mathiasbynens/emoji-test-regex-pattern), which generates (at build time) the regular expression pattern based on the Unicode Standard. As a result, _emoji-regex_ can easily be updated whenever new emoji are added to Unicode.
|
||||
|
||||
Since each version of _emoji-regex_ is tied to the latest Unicode version at the time of release, results are deterministic. This is important for use cases like image replacement, where you want to guarantee that an image asset is available for every possibly matched emoji. If you don’t need a deterministic regex, a lighter-weight, general emoji pattern is available via the [_emoji-regex-xs_](https://github.com/slevithan/emoji-regex-xs) package that follows the same API.
|
||||
|
||||
## Installation
|
||||
|
||||
Via [npm](https://www.npmjs.com/):
|
||||
|
||||
```bash
|
||||
npm install emoji-regex
|
||||
```
|
||||
|
||||
In [Node.js](https://nodejs.org/):
|
||||
|
||||
```js
|
||||
const emojiRegex = require('emoji-regex');
|
||||
// Note: because the regular expression has the global flag set, this module
|
||||
// exports a function that returns the regex rather than exporting the regular
|
||||
// expression itself, to make it impossible to (accidentally) mutate the
|
||||
// original regular expression.
|
||||
|
||||
const text = `
|
||||
\u{231A}: ⌚ default emoji presentation character (Emoji_Presentation)
|
||||
\u{2194}\u{FE0F}: ↔️ default text presentation character rendered as emoji
|
||||
\u{1F469}: 👩 emoji modifier base (Emoji_Modifier_Base)
|
||||
\u{1F469}\u{1F3FF}: 👩🏿 emoji modifier base followed by a modifier
|
||||
`;
|
||||
|
||||
const regex = emojiRegex();
|
||||
for (const match of text.matchAll(regex)) {
|
||||
const emoji = match[0];
|
||||
console.log(`Matched sequence ${ emoji } — code points: ${ [...emoji].length }`);
|
||||
}
|
||||
```
|
||||
|
||||
Console output:
|
||||
|
||||
```
|
||||
Matched sequence ⌚ — code points: 1
|
||||
Matched sequence ⌚ — code points: 1
|
||||
Matched sequence ↔️ — code points: 2
|
||||
Matched sequence ↔️ — code points: 2
|
||||
Matched sequence 👩 — code points: 1
|
||||
Matched sequence 👩 — code points: 1
|
||||
Matched sequence 👩🏿 — code points: 2
|
||||
Matched sequence 👩🏿 — code points: 2
|
||||
```
|
||||
|
||||
## For maintainers
|
||||
|
||||
### How to update emoji-regex after new Unicode Standard releases
|
||||
|
||||
1. [Update _emoji-test-regex-pattern_ as described in its repository](https://github.com/mathiasbynens/emoji-test-regex-pattern#how-to-update-emoji-test-regex-pattern-after-new-uts51-releases).
|
||||
|
||||
1. Bump the _emoji-test-regex-pattern_ dependency to the latest version.
|
||||
|
||||
1. Update the Unicode data dependency in `package.json` by running the following commands:
|
||||
|
||||
```sh
|
||||
# Example: updating from Unicode v13 to Unicode v14.
|
||||
npm uninstall @unicode/unicode-13.0.0
|
||||
npm install @unicode/unicode-14.0.0 --save-dev
|
||||
````
|
||||
|
||||
1. Generate the new output:
|
||||
|
||||
```sh
|
||||
npm run build
|
||||
```
|
||||
|
||||
1. Verify that tests still pass:
|
||||
|
||||
```sh
|
||||
npm test
|
||||
```
|
||||
|
||||
### How to publish a new release
|
||||
|
||||
1. On the `main` branch, bump the emoji-regex version number in `package.json`:
|
||||
|
||||
```sh
|
||||
npm version patch -m 'Release v%s'
|
||||
```
|
||||
|
||||
Instead of `patch`, use `minor` or `major` [as needed](https://semver.org/).
|
||||
|
||||
Note that this produces a Git commit + tag.
|
||||
|
||||
1. Push the release commit and tag:
|
||||
|
||||
```sh
|
||||
git push && git push --tags
|
||||
```
|
||||
|
||||
Our CI then automatically publishes the new release to npm.
|
||||
|
||||
## Author
|
||||
|
||||
| [](https://twitter.com/mathias "Follow @mathias on Twitter") |
|
||||
|---|
|
||||
| [Mathias Bynens](https://mathiasbynens.be/) |
|
||||
|
||||
## License
|
||||
|
||||
_emoji-regex_ is available under the [MIT](https://mths.be/mit) license.
|
||||
3
Jira_helper/node_modules/emoji-regex/index.d.ts
generated
vendored
Normal file
3
Jira_helper/node_modules/emoji-regex/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
declare module 'emoji-regex' {
|
||||
export default function emojiRegex(): RegExp;
|
||||
}
|
||||
4
Jira_helper/node_modules/emoji-regex/index.js
generated
vendored
Normal file
4
Jira_helper/node_modules/emoji-regex/index.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
4
Jira_helper/node_modules/emoji-regex/index.mjs
generated
vendored
Normal file
4
Jira_helper/node_modules/emoji-regex/index.mjs
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
45
Jira_helper/node_modules/emoji-regex/package.json
generated
vendored
Normal file
45
Jira_helper/node_modules/emoji-regex/package.json
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"name": "emoji-regex",
|
||||
"version": "10.5.0",
|
||||
"description": "A regular expression to match all Emoji-only symbols as per the Unicode Standard.",
|
||||
"homepage": "https://mths.be/emoji-regex",
|
||||
"main": "index.js",
|
||||
"module": "index.mjs",
|
||||
"types": "index.d.ts",
|
||||
"keywords": [
|
||||
"unicode",
|
||||
"regex",
|
||||
"regexp",
|
||||
"regular expressions",
|
||||
"code points",
|
||||
"symbols",
|
||||
"characters",
|
||||
"emoji"
|
||||
],
|
||||
"license": "MIT",
|
||||
"author": {
|
||||
"name": "Mathias Bynens",
|
||||
"url": "https://mathiasbynens.be/"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/mathiasbynens/emoji-regex.git"
|
||||
},
|
||||
"bugs": "https://github.com/mathiasbynens/emoji-regex/issues",
|
||||
"files": [
|
||||
"LICENSE-MIT.txt",
|
||||
"index.js",
|
||||
"index.d.ts",
|
||||
"index.mjs"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "node script/build.js",
|
||||
"test": "mocha",
|
||||
"test:watch": "npm run test -- --watch"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@unicode/unicode-17.0.0": "^1.6.10",
|
||||
"emoji-test-regex-pattern": "^2.3.0",
|
||||
"mocha": "^11.7.1"
|
||||
}
|
||||
}
|
||||
22
Jira_helper/node_modules/escalade/dist/index.js
generated
vendored
Normal file
22
Jira_helper/node_modules/escalade/dist/index.js
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
const { dirname, resolve } = require('path');
|
||||
const { readdir, stat } = require('fs');
|
||||
const { promisify } = require('util');
|
||||
|
||||
const toStats = promisify(stat);
|
||||
const toRead = promisify(readdir);
|
||||
|
||||
module.exports = async function (start, callback) {
|
||||
let dir = resolve('.', start);
|
||||
let tmp, stats = await toStats(dir);
|
||||
|
||||
if (!stats.isDirectory()) {
|
||||
dir = dirname(dir);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
tmp = await callback(dir, await toRead(dir));
|
||||
if (tmp) return resolve(dir, tmp);
|
||||
dir = dirname(tmp = dir);
|
||||
if (tmp === dir) break;
|
||||
}
|
||||
}
|
||||
22
Jira_helper/node_modules/escalade/dist/index.mjs
generated
vendored
Normal file
22
Jira_helper/node_modules/escalade/dist/index.mjs
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
import { dirname, resolve } from 'path';
|
||||
import { readdir, stat } from 'fs';
|
||||
import { promisify } from 'util';
|
||||
|
||||
const toStats = promisify(stat);
|
||||
const toRead = promisify(readdir);
|
||||
|
||||
export default async function (start, callback) {
|
||||
let dir = resolve('.', start);
|
||||
let tmp, stats = await toStats(dir);
|
||||
|
||||
if (!stats.isDirectory()) {
|
||||
dir = dirname(dir);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
tmp = await callback(dir, await toRead(dir));
|
||||
if (tmp) return resolve(dir, tmp);
|
||||
dir = dirname(tmp = dir);
|
||||
if (tmp === dir) break;
|
||||
}
|
||||
}
|
||||
11
Jira_helper/node_modules/escalade/index.d.mts
generated
vendored
Normal file
11
Jira_helper/node_modules/escalade/index.d.mts
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
type Promisable<T> = T | Promise<T>;
|
||||
|
||||
export type Callback = (
|
||||
directory: string,
|
||||
files: string[],
|
||||
) => Promisable<string | false | void>;
|
||||
|
||||
export default function (
|
||||
directory: string,
|
||||
callback: Callback,
|
||||
): Promise<string | void>;
|
||||
15
Jira_helper/node_modules/escalade/index.d.ts
generated
vendored
Normal file
15
Jira_helper/node_modules/escalade/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
type Promisable<T> = T | Promise<T>;
|
||||
|
||||
declare namespace escalade {
|
||||
export type Callback = (
|
||||
directory: string,
|
||||
files: string[],
|
||||
) => Promisable<string | false | void>;
|
||||
}
|
||||
|
||||
declare function escalade(
|
||||
directory: string,
|
||||
callback: escalade.Callback,
|
||||
): Promise<string | void>;
|
||||
|
||||
export = escalade;
|
||||
9
Jira_helper/node_modules/escalade/license
generated
vendored
Normal file
9
Jira_helper/node_modules/escalade/license
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Luke Edwards <luke.edwards05@gmail.com> (lukeed.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
74
Jira_helper/node_modules/escalade/package.json
generated
vendored
Normal file
74
Jira_helper/node_modules/escalade/package.json
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
{
|
||||
"name": "escalade",
|
||||
"version": "3.2.0",
|
||||
"repository": "lukeed/escalade",
|
||||
"description": "A tiny (183B to 210B) and fast utility to ascend parent directories",
|
||||
"module": "dist/index.mjs",
|
||||
"main": "dist/index.js",
|
||||
"types": "index.d.ts",
|
||||
"license": "MIT",
|
||||
"author": {
|
||||
"name": "Luke Edwards",
|
||||
"email": "luke.edwards05@gmail.com",
|
||||
"url": "https://lukeed.com"
|
||||
},
|
||||
"exports": {
|
||||
".": [
|
||||
{
|
||||
"import": {
|
||||
"types": "./index.d.mts",
|
||||
"default": "./dist/index.mjs"
|
||||
},
|
||||
"require": {
|
||||
"types": "./index.d.ts",
|
||||
"default": "./dist/index.js"
|
||||
}
|
||||
},
|
||||
"./dist/index.js"
|
||||
],
|
||||
"./sync": [
|
||||
{
|
||||
"import": {
|
||||
"types": "./sync/index.d.mts",
|
||||
"default": "./sync/index.mjs"
|
||||
},
|
||||
"require": {
|
||||
"types": "./sync/index.d.ts",
|
||||
"default": "./sync/index.js"
|
||||
}
|
||||
},
|
||||
"./sync/index.js"
|
||||
]
|
||||
},
|
||||
"files": [
|
||||
"*.d.mts",
|
||||
"*.d.ts",
|
||||
"dist",
|
||||
"sync"
|
||||
],
|
||||
"modes": {
|
||||
"sync": "src/sync.js",
|
||||
"default": "src/async.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "bundt",
|
||||
"pretest": "npm run build",
|
||||
"test": "uvu -r esm test -i fixtures"
|
||||
},
|
||||
"keywords": [
|
||||
"find",
|
||||
"parent",
|
||||
"parents",
|
||||
"directory",
|
||||
"search",
|
||||
"walk"
|
||||
],
|
||||
"devDependencies": {
|
||||
"bundt": "1.1.1",
|
||||
"esm": "3.2.25",
|
||||
"uvu": "0.3.3"
|
||||
}
|
||||
}
|
||||
211
Jira_helper/node_modules/escalade/readme.md
generated
vendored
Normal file
211
Jira_helper/node_modules/escalade/readme.md
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
# escalade [](https://github.com/lukeed/escalade/actions) [](https://licenses.dev/npm/escalade) [](https://codecov.io/gh/lukeed/escalade)
|
||||
|
||||
> A tiny (183B to 210B) and [fast](#benchmarks) utility to ascend parent directories
|
||||
|
||||
With [escalade](https://en.wikipedia.org/wiki/Escalade), you can scale parent directories until you've found what you're looking for.<br>Given an input file or directory, `escalade` will continue executing your callback function until either:
|
||||
|
||||
1) the callback returns a truthy value
|
||||
2) `escalade` has reached the system root directory (eg, `/`)
|
||||
|
||||
> **Important:**<br>Please note that `escalade` only deals with direct ancestry – it will not dive into parents' sibling directories.
|
||||
|
||||
---
|
||||
|
||||
**Notice:** As of v3.1.0, `escalade` now includes [Deno support](http://deno.land/x/escalade)! Please see [Deno Usage](#deno) below.
|
||||
|
||||
---
|
||||
|
||||
## Install
|
||||
|
||||
```
|
||||
$ npm install --save escalade
|
||||
```
|
||||
|
||||
|
||||
## Modes
|
||||
|
||||
There are two "versions" of `escalade` available:
|
||||
|
||||
#### "async"
|
||||
> **Node.js:** >= 8.x<br>
|
||||
> **Size (gzip):** 210 bytes<br>
|
||||
> **Availability:** [CommonJS](https://unpkg.com/escalade/dist/index.js), [ES Module](https://unpkg.com/escalade/dist/index.mjs)
|
||||
|
||||
This is the primary/default mode. It makes use of `async`/`await` and [`util.promisify`](https://nodejs.org/api/util.html#util_util_promisify_original).
|
||||
|
||||
#### "sync"
|
||||
> **Node.js:** >= 6.x<br>
|
||||
> **Size (gzip):** 183 bytes<br>
|
||||
> **Availability:** [CommonJS](https://unpkg.com/escalade/sync/index.js), [ES Module](https://unpkg.com/escalade/sync/index.mjs)
|
||||
|
||||
This is the opt-in mode, ideal for scenarios where `async` usage cannot be supported.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
***Example Structure***
|
||||
|
||||
```
|
||||
/Users/lukeed
|
||||
└── oss
|
||||
├── license
|
||||
└── escalade
|
||||
├── package.json
|
||||
└── test
|
||||
└── fixtures
|
||||
├── index.js
|
||||
└── foobar
|
||||
└── demo.js
|
||||
```
|
||||
|
||||
***Example Usage***
|
||||
|
||||
```js
|
||||
//~> demo.js
|
||||
import { join } from 'path';
|
||||
import escalade from 'escalade';
|
||||
|
||||
const input = join(__dirname, 'demo.js');
|
||||
// or: const input = __dirname;
|
||||
|
||||
const pkg = await escalade(input, (dir, names) => {
|
||||
console.log('~> dir:', dir);
|
||||
console.log('~> names:', names);
|
||||
console.log('---');
|
||||
|
||||
if (names.includes('package.json')) {
|
||||
// will be resolved into absolute
|
||||
return 'package.json';
|
||||
}
|
||||
});
|
||||
|
||||
//~> dir: /Users/lukeed/oss/escalade/test/fixtures/foobar
|
||||
//~> names: ['demo.js']
|
||||
//---
|
||||
//~> dir: /Users/lukeed/oss/escalade/test/fixtures
|
||||
//~> names: ['index.js', 'foobar']
|
||||
//---
|
||||
//~> dir: /Users/lukeed/oss/escalade/test
|
||||
//~> names: ['fixtures']
|
||||
//---
|
||||
//~> dir: /Users/lukeed/oss/escalade
|
||||
//~> names: ['package.json', 'test']
|
||||
//---
|
||||
|
||||
console.log(pkg);
|
||||
//=> /Users/lukeed/oss/escalade/package.json
|
||||
|
||||
// Now search for "missing123.txt"
|
||||
// (Assume it doesn't exist anywhere!)
|
||||
const missing = await escalade(input, (dir, names) => {
|
||||
console.log('~> dir:', dir);
|
||||
return names.includes('missing123.txt') && 'missing123.txt';
|
||||
});
|
||||
|
||||
//~> dir: /Users/lukeed/oss/escalade/test/fixtures/foobar
|
||||
//~> dir: /Users/lukeed/oss/escalade/test/fixtures
|
||||
//~> dir: /Users/lukeed/oss/escalade/test
|
||||
//~> dir: /Users/lukeed/oss/escalade
|
||||
//~> dir: /Users/lukeed/oss
|
||||
//~> dir: /Users/lukeed
|
||||
//~> dir: /Users
|
||||
//~> dir: /
|
||||
|
||||
console.log(missing);
|
||||
//=> undefined
|
||||
```
|
||||
|
||||
> **Note:** To run the above example with "sync" mode, import from `escalade/sync` and remove the `await` keyword.
|
||||
|
||||
|
||||
## API
|
||||
|
||||
### escalade(input, callback)
|
||||
Returns: `string|void` or `Promise<string|void>`
|
||||
|
||||
When your `callback` locates a file, `escalade` will resolve/return with an absolute path.<br>
|
||||
If your `callback` was never satisfied, then `escalade` will resolve/return with nothing (undefined).
|
||||
|
||||
> **Important:**<br>The `sync` and `async` versions share the same API.<br>The **only** difference is that `sync` is not Promise-based.
|
||||
|
||||
#### input
|
||||
Type: `string`
|
||||
|
||||
The path from which to start ascending.
|
||||
|
||||
This may be a file or a directory path.<br>However, when `input` is a file, `escalade` will begin with its parent directory.
|
||||
|
||||
> **Important:** Unless given an absolute path, `input` will be resolved from `process.cwd()` location.
|
||||
|
||||
#### callback
|
||||
Type: `Function`
|
||||
|
||||
The callback to execute for each ancestry level. It always is given two arguments:
|
||||
|
||||
1) `dir` - an absolute path of the current parent directory
|
||||
2) `names` - a list (`string[]`) of contents _relative to_ the `dir` parent
|
||||
|
||||
> **Note:** The `names` list can contain names of files _and_ directories.
|
||||
|
||||
When your callback returns a _falsey_ value, then `escalade` will continue with `dir`'s parent directory, re-invoking your callback with new argument values.
|
||||
|
||||
When your callback returns a string, then `escalade` stops iteration immediately.<br>
|
||||
If the string is an absolute path, then it's left as is. Otherwise, the string is resolved into an absolute path _from_ the `dir` that housed the satisfying condition.
|
||||
|
||||
> **Important:** Your `callback` can be a `Promise/AsyncFunction` when using the "async" version of `escalade`.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
> Running on Node.js v10.13.0
|
||||
|
||||
```
|
||||
# Load Time
|
||||
find-up 3.891ms
|
||||
escalade 0.485ms
|
||||
escalade/sync 0.309ms
|
||||
|
||||
# Levels: 6 (target = "foo.txt"):
|
||||
find-up x 24,856 ops/sec ±6.46% (55 runs sampled)
|
||||
escalade x 73,084 ops/sec ±4.23% (73 runs sampled)
|
||||
find-up.sync x 3,663 ops/sec ±1.12% (83 runs sampled)
|
||||
escalade/sync x 9,360 ops/sec ±0.62% (88 runs sampled)
|
||||
|
||||
# Levels: 12 (target = "package.json"):
|
||||
find-up x 29,300 ops/sec ±10.68% (70 runs sampled)
|
||||
escalade x 73,685 ops/sec ± 5.66% (66 runs sampled)
|
||||
find-up.sync x 1,707 ops/sec ± 0.58% (91 runs sampled)
|
||||
escalade/sync x 4,667 ops/sec ± 0.68% (94 runs sampled)
|
||||
|
||||
# Levels: 18 (target = "missing123.txt"):
|
||||
find-up x 21,818 ops/sec ±17.37% (14 runs sampled)
|
||||
escalade x 67,101 ops/sec ±21.60% (20 runs sampled)
|
||||
find-up.sync x 1,037 ops/sec ± 2.86% (88 runs sampled)
|
||||
escalade/sync x 1,248 ops/sec ± 0.50% (93 runs sampled)
|
||||
```
|
||||
|
||||
## Deno
|
||||
|
||||
As of v3.1.0, `escalade` is available on the Deno registry.
|
||||
|
||||
Please note that the [API](#api) is identical and that there are still [two modes](#modes) from which to choose:
|
||||
|
||||
```ts
|
||||
// Choose "async" mode
|
||||
import escalade from 'https://deno.land/escalade/async.ts';
|
||||
|
||||
// Choose "sync" mode
|
||||
import escalade from 'https://deno.land/escalade/sync.ts';
|
||||
```
|
||||
|
||||
> **Important:** The `allow-read` permission is required!
|
||||
|
||||
|
||||
## Related
|
||||
|
||||
- [premove](https://github.com/lukeed/premove) - A tiny (247B) utility to remove items recursively
|
||||
- [totalist](https://github.com/lukeed/totalist) - A tiny (195B to 224B) utility to recursively list all (total) files in a directory
|
||||
- [mk-dirs](https://github.com/lukeed/mk-dirs) - A tiny (420B) utility to make a directory and its parents, recursively
|
||||
|
||||
## License
|
||||
|
||||
MIT © [Luke Edwards](https://lukeed.com)
|
||||
9
Jira_helper/node_modules/escalade/sync/index.d.mts
generated
vendored
Normal file
9
Jira_helper/node_modules/escalade/sync/index.d.mts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
export type Callback = (
|
||||
directory: string,
|
||||
files: string[],
|
||||
) => string | false | void;
|
||||
|
||||
export default function (
|
||||
directory: string,
|
||||
callback: Callback,
|
||||
): string | void;
|
||||
13
Jira_helper/node_modules/escalade/sync/index.d.ts
generated
vendored
Normal file
13
Jira_helper/node_modules/escalade/sync/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
declare namespace escalade {
|
||||
export type Callback = (
|
||||
directory: string,
|
||||
files: string[],
|
||||
) => string | false | void;
|
||||
}
|
||||
|
||||
declare function escalade(
|
||||
directory: string,
|
||||
callback: escalade.Callback,
|
||||
): string | void;
|
||||
|
||||
export = escalade;
|
||||
18
Jira_helper/node_modules/escalade/sync/index.js
generated
vendored
Normal file
18
Jira_helper/node_modules/escalade/sync/index.js
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
const { dirname, resolve } = require('path');
|
||||
const { readdirSync, statSync } = require('fs');
|
||||
|
||||
module.exports = function (start, callback) {
|
||||
let dir = resolve('.', start);
|
||||
let tmp, stats = statSync(dir);
|
||||
|
||||
if (!stats.isDirectory()) {
|
||||
dir = dirname(dir);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
tmp = callback(dir, readdirSync(dir));
|
||||
if (tmp) return resolve(dir, tmp);
|
||||
dir = dirname(tmp = dir);
|
||||
if (tmp === dir) break;
|
||||
}
|
||||
}
|
||||
18
Jira_helper/node_modules/escalade/sync/index.mjs
generated
vendored
Normal file
18
Jira_helper/node_modules/escalade/sync/index.mjs
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
import { dirname, resolve } from 'path';
|
||||
import { readdirSync, statSync } from 'fs';
|
||||
|
||||
export default function (start, callback) {
|
||||
let dir = resolve('.', start);
|
||||
let tmp, stats = statSync(dir);
|
||||
|
||||
if (!stats.isDirectory()) {
|
||||
dir = dirname(dir);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
tmp = callback(dir, readdirSync(dir));
|
||||
if (tmp) return resolve(dir, tmp);
|
||||
dir = dirname(tmp = dir);
|
||||
if (tmp === dir) break;
|
||||
}
|
||||
}
|
||||
6
Jira_helper/node_modules/get-caller-file/LICENSE.md
generated
vendored
Normal file
6
Jira_helper/node_modules/get-caller-file/LICENSE.md
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
ISC License (ISC)
|
||||
Copyright 2018 Stefan Penner
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
41
Jira_helper/node_modules/get-caller-file/README.md
generated
vendored
Normal file
41
Jira_helper/node_modules/get-caller-file/README.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
# get-caller-file
|
||||
|
||||
[](https://travis-ci.org/stefanpenner/get-caller-file)
|
||||
[](https://ci.appveyor.com/project/embercli/get-caller-file/branch/master)
|
||||
|
||||
This is a utility, which allows a function to figure out from which file it was invoked. It does so by inspecting v8's stack trace at the time it is invoked.
|
||||
|
||||
Inspired by http://stackoverflow.com/questions/13227489
|
||||
|
||||
*note: this relies on Node/V8 specific APIs, as such other runtimes may not work*
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
yarn add get-caller-file
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Given:
|
||||
|
||||
```js
|
||||
// ./foo.js
|
||||
const getCallerFile = require('get-caller-file');
|
||||
|
||||
module.exports = function() {
|
||||
return getCallerFile(); // figures out who called it
|
||||
};
|
||||
```
|
||||
|
||||
```js
|
||||
// index.js
|
||||
const foo = require('./foo');
|
||||
|
||||
foo() // => /full/path/to/this/file/index.js
|
||||
```
|
||||
|
||||
|
||||
## Options:
|
||||
|
||||
* `getCallerFile(position = 2)`: where position is stack frame whos fileName we want.
|
||||
2
Jira_helper/node_modules/get-caller-file/index.d.ts
generated
vendored
Normal file
2
Jira_helper/node_modules/get-caller-file/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
declare const _default: (position?: number) => any;
|
||||
export = _default;
|
||||
22
Jira_helper/node_modules/get-caller-file/index.js
generated
vendored
Normal file
22
Jira_helper/node_modules/get-caller-file/index.js
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
"use strict";
|
||||
// Call this function in a another function to find out the file from
|
||||
// which that function was called from. (Inspects the v8 stack trace)
|
||||
//
|
||||
// Inspired by http://stackoverflow.com/questions/13227489
|
||||
module.exports = function getCallerFile(position) {
|
||||
if (position === void 0) { position = 2; }
|
||||
if (position >= Error.stackTraceLimit) {
|
||||
throw new TypeError('getCallerFile(position) requires position be less then Error.stackTraceLimit but position was: `' + position + '` and Error.stackTraceLimit was: `' + Error.stackTraceLimit + '`');
|
||||
}
|
||||
var oldPrepareStackTrace = Error.prepareStackTrace;
|
||||
Error.prepareStackTrace = function (_, stack) { return stack; };
|
||||
var stack = new Error().stack;
|
||||
Error.prepareStackTrace = oldPrepareStackTrace;
|
||||
if (stack !== null && typeof stack === 'object') {
|
||||
// stack[0] holds this file
|
||||
// stack[1] holds where this function was called
|
||||
// stack[2] holds the file we're interested in
|
||||
return stack[position] ? stack[position].getFileName() : undefined;
|
||||
}
|
||||
};
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
Jira_helper/node_modules/get-caller-file/index.js.map
generated
vendored
Normal file
1
Jira_helper/node_modules/get-caller-file/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["index.ts"],"names":[],"mappings":";AAAA,qEAAqE;AACrE,qEAAqE;AACrE,EAAE;AACF,0DAA0D;AAE1D,iBAAS,SAAS,aAAa,CAAC,QAAY;IAAZ,yBAAA,EAAA,YAAY;IAC1C,IAAI,QAAQ,IAAI,KAAK,CAAC,eAAe,EAAE;QACrC,MAAM,IAAI,SAAS,CAAC,kGAAkG,GAAG,QAAQ,GAAG,oCAAoC,GAAG,KAAK,CAAC,eAAe,GAAG,GAAG,CAAC,CAAC;KACzM;IAED,IAAM,oBAAoB,GAAG,KAAK,CAAC,iBAAiB,CAAC;IACrD,KAAK,CAAC,iBAAiB,GAAG,UAAC,CAAC,EAAE,KAAK,IAAM,OAAA,KAAK,EAAL,CAAK,CAAC;IAC/C,IAAM,KAAK,GAAG,IAAI,KAAK,EAAE,CAAC,KAAK,CAAC;IAChC,KAAK,CAAC,iBAAiB,GAAG,oBAAoB,CAAC;IAG/C,IAAI,KAAK,KAAK,IAAI,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE;QAC/C,2BAA2B;QAC3B,gDAAgD;QAChD,8CAA8C;QAC9C,OAAO,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAE,KAAK,CAAC,QAAQ,CAAS,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;KAC7E;AACH,CAAC,CAAC"}
|
||||
42
Jira_helper/node_modules/get-caller-file/package.json
generated
vendored
Normal file
42
Jira_helper/node_modules/get-caller-file/package.json
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"name": "get-caller-file",
|
||||
"version": "2.0.5",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"directories": {
|
||||
"test": "tests"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.js.map",
|
||||
"index.d.ts"
|
||||
],
|
||||
"scripts": {
|
||||
"prepare": "tsc",
|
||||
"test": "mocha test",
|
||||
"test:debug": "mocha test"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/stefanpenner/get-caller-file.git"
|
||||
},
|
||||
"author": "Stefan Penner",
|
||||
"license": "ISC",
|
||||
"bugs": {
|
||||
"url": "https://github.com/stefanpenner/get-caller-file/issues"
|
||||
},
|
||||
"homepage": "https://github.com/stefanpenner/get-caller-file#readme",
|
||||
"devDependencies": {
|
||||
"@types/chai": "^4.1.7",
|
||||
"@types/ensure-posix-path": "^1.0.0",
|
||||
"@types/mocha": "^5.2.6",
|
||||
"@types/node": "^11.10.5",
|
||||
"chai": "^4.1.2",
|
||||
"ensure-posix-path": "^1.0.1",
|
||||
"mocha": "^5.2.0",
|
||||
"typescript": "^3.3.3333"
|
||||
},
|
||||
"engines": {
|
||||
"node": "6.* || 8.* || >= 10.*"
|
||||
}
|
||||
}
|
||||
60
Jira_helper/node_modules/get-east-asian-width/index.d.ts
generated
vendored
Normal file
60
Jira_helper/node_modules/get-east-asian-width/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
export type WidthType = 'fullwidth' | 'halfwidth' | 'wide' | 'narrow' | 'neutral' | 'ambiguous';
|
||||
|
||||
export type Options = {
|
||||
/**
|
||||
Whether to treat an `'ambiguous'` character as wide.
|
||||
|
||||
@default true
|
||||
|
||||
@example
|
||||
```
|
||||
import {eastAsianWidth} from 'get-east-asian-width';
|
||||
|
||||
const codePoint = '⛣'.codePointAt(0);
|
||||
|
||||
console.log(eastAsianWidth(codePoint));
|
||||
//=> 1
|
||||
|
||||
console.log(eastAsianWidth(codePoint, {ambiguousAsWide: true}));
|
||||
//=> 2
|
||||
```
|
||||
|
||||
> Ambiguous characters behave like wide or narrow characters depending on the context (language tag, script identification, associated font, source of data, or explicit markup; all can provide the context). __If the context cannot be established reliably, they should be treated as narrow characters by default.__
|
||||
> - http://www.unicode.org/reports/tr11/
|
||||
*/
|
||||
readonly ambiguousAsWide?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
Returns the width as a number for the given code point.
|
||||
|
||||
@param codePoint - A Unicode code point.
|
||||
|
||||
@example
|
||||
```
|
||||
import {eastAsianWidth} from 'get-east-asian-width';
|
||||
|
||||
const codePoint = '字'.codePointAt(0);
|
||||
|
||||
console.log(eastAsianWidth(codePoint));
|
||||
//=> 2
|
||||
```
|
||||
*/
|
||||
export function eastAsianWidth(codePoint: number, options?: Options): 1 | 2;
|
||||
|
||||
/**
|
||||
Returns the type of “East Asian Width” for the given code point.
|
||||
|
||||
@param codePoint - A Unicode code point.
|
||||
|
||||
@example
|
||||
```
|
||||
import {eastAsianWidthType} from 'get-east-asian-width';
|
||||
|
||||
const codePoint = '字'.codePointAt(0);
|
||||
|
||||
console.log(eastAsianWidthType(codePoint));
|
||||
//=> 'wide'
|
||||
```
|
||||
*/
|
||||
export function eastAsianWidthType(codePoint: number): WidthType;
|
||||
30
Jira_helper/node_modules/get-east-asian-width/index.js
generated
vendored
Normal file
30
Jira_helper/node_modules/get-east-asian-width/index.js
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
import {getCategory, isAmbiguous, isFullWidth, isWide} from './lookup.js';
|
||||
|
||||
function validate(codePoint) {
|
||||
if (!Number.isSafeInteger(codePoint)) {
|
||||
throw new TypeError(`Expected a code point, got \`${typeof codePoint}\`.`);
|
||||
}
|
||||
}
|
||||
|
||||
export function eastAsianWidthType(codePoint) {
|
||||
validate(codePoint);
|
||||
|
||||
return getCategory(codePoint);
|
||||
}
|
||||
|
||||
export function eastAsianWidth(codePoint, {ambiguousAsWide = false} = {}) {
|
||||
validate(codePoint);
|
||||
|
||||
if (
|
||||
isFullWidth(codePoint)
|
||||
|| isWide(codePoint)
|
||||
|| (ambiguousAsWide && isAmbiguous(codePoint))
|
||||
) {
|
||||
return 2;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Private exports for https://github.com/sindresorhus/is-fullwidth-code-point
|
||||
export {isFullWidth as _isFullWidth, isWide as _isWide} from './lookup.js';
|
||||
9
Jira_helper/node_modules/get-east-asian-width/license
generated
vendored
Normal file
9
Jira_helper/node_modules/get-east-asian-width/license
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
403
Jira_helper/node_modules/get-east-asian-width/lookup.js
generated
vendored
Normal file
403
Jira_helper/node_modules/get-east-asian-width/lookup.js
generated
vendored
Normal file
@@ -0,0 +1,403 @@
|
||||
// Generated code.
|
||||
|
||||
function isAmbiguous(x) {
|
||||
return x === 0xA1
|
||||
|| x === 0xA4
|
||||
|| x === 0xA7
|
||||
|| x === 0xA8
|
||||
|| x === 0xAA
|
||||
|| x === 0xAD
|
||||
|| x === 0xAE
|
||||
|| x >= 0xB0 && x <= 0xB4
|
||||
|| x >= 0xB6 && x <= 0xBA
|
||||
|| x >= 0xBC && x <= 0xBF
|
||||
|| x === 0xC6
|
||||
|| x === 0xD0
|
||||
|| x === 0xD7
|
||||
|| x === 0xD8
|
||||
|| x >= 0xDE && x <= 0xE1
|
||||
|| x === 0xE6
|
||||
|| x >= 0xE8 && x <= 0xEA
|
||||
|| x === 0xEC
|
||||
|| x === 0xED
|
||||
|| x === 0xF0
|
||||
|| x === 0xF2
|
||||
|| x === 0xF3
|
||||
|| x >= 0xF7 && x <= 0xFA
|
||||
|| x === 0xFC
|
||||
|| x === 0xFE
|
||||
|| x === 0x101
|
||||
|| x === 0x111
|
||||
|| x === 0x113
|
||||
|| x === 0x11B
|
||||
|| x === 0x126
|
||||
|| x === 0x127
|
||||
|| x === 0x12B
|
||||
|| x >= 0x131 && x <= 0x133
|
||||
|| x === 0x138
|
||||
|| x >= 0x13F && x <= 0x142
|
||||
|| x === 0x144
|
||||
|| x >= 0x148 && x <= 0x14B
|
||||
|| x === 0x14D
|
||||
|| x === 0x152
|
||||
|| x === 0x153
|
||||
|| x === 0x166
|
||||
|| x === 0x167
|
||||
|| x === 0x16B
|
||||
|| x === 0x1CE
|
||||
|| x === 0x1D0
|
||||
|| x === 0x1D2
|
||||
|| x === 0x1D4
|
||||
|| x === 0x1D6
|
||||
|| x === 0x1D8
|
||||
|| x === 0x1DA
|
||||
|| x === 0x1DC
|
||||
|| x === 0x251
|
||||
|| x === 0x261
|
||||
|| x === 0x2C4
|
||||
|| x === 0x2C7
|
||||
|| x >= 0x2C9 && x <= 0x2CB
|
||||
|| x === 0x2CD
|
||||
|| x === 0x2D0
|
||||
|| x >= 0x2D8 && x <= 0x2DB
|
||||
|| x === 0x2DD
|
||||
|| x === 0x2DF
|
||||
|| x >= 0x300 && x <= 0x36F
|
||||
|| x >= 0x391 && x <= 0x3A1
|
||||
|| x >= 0x3A3 && x <= 0x3A9
|
||||
|| x >= 0x3B1 && x <= 0x3C1
|
||||
|| x >= 0x3C3 && x <= 0x3C9
|
||||
|| x === 0x401
|
||||
|| x >= 0x410 && x <= 0x44F
|
||||
|| x === 0x451
|
||||
|| x === 0x2010
|
||||
|| x >= 0x2013 && x <= 0x2016
|
||||
|| x === 0x2018
|
||||
|| x === 0x2019
|
||||
|| x === 0x201C
|
||||
|| x === 0x201D
|
||||
|| x >= 0x2020 && x <= 0x2022
|
||||
|| x >= 0x2024 && x <= 0x2027
|
||||
|| x === 0x2030
|
||||
|| x === 0x2032
|
||||
|| x === 0x2033
|
||||
|| x === 0x2035
|
||||
|| x === 0x203B
|
||||
|| x === 0x203E
|
||||
|| x === 0x2074
|
||||
|| x === 0x207F
|
||||
|| x >= 0x2081 && x <= 0x2084
|
||||
|| x === 0x20AC
|
||||
|| x === 0x2103
|
||||
|| x === 0x2105
|
||||
|| x === 0x2109
|
||||
|| x === 0x2113
|
||||
|| x === 0x2116
|
||||
|| x === 0x2121
|
||||
|| x === 0x2122
|
||||
|| x === 0x2126
|
||||
|| x === 0x212B
|
||||
|| x === 0x2153
|
||||
|| x === 0x2154
|
||||
|| x >= 0x215B && x <= 0x215E
|
||||
|| x >= 0x2160 && x <= 0x216B
|
||||
|| x >= 0x2170 && x <= 0x2179
|
||||
|| x === 0x2189
|
||||
|| x >= 0x2190 && x <= 0x2199
|
||||
|| x === 0x21B8
|
||||
|| x === 0x21B9
|
||||
|| x === 0x21D2
|
||||
|| x === 0x21D4
|
||||
|| x === 0x21E7
|
||||
|| x === 0x2200
|
||||
|| x === 0x2202
|
||||
|| x === 0x2203
|
||||
|| x === 0x2207
|
||||
|| x === 0x2208
|
||||
|| x === 0x220B
|
||||
|| x === 0x220F
|
||||
|| x === 0x2211
|
||||
|| x === 0x2215
|
||||
|| x === 0x221A
|
||||
|| x >= 0x221D && x <= 0x2220
|
||||
|| x === 0x2223
|
||||
|| x === 0x2225
|
||||
|| x >= 0x2227 && x <= 0x222C
|
||||
|| x === 0x222E
|
||||
|| x >= 0x2234 && x <= 0x2237
|
||||
|| x === 0x223C
|
||||
|| x === 0x223D
|
||||
|| x === 0x2248
|
||||
|| x === 0x224C
|
||||
|| x === 0x2252
|
||||
|| x === 0x2260
|
||||
|| x === 0x2261
|
||||
|| x >= 0x2264 && x <= 0x2267
|
||||
|| x === 0x226A
|
||||
|| x === 0x226B
|
||||
|| x === 0x226E
|
||||
|| x === 0x226F
|
||||
|| x === 0x2282
|
||||
|| x === 0x2283
|
||||
|| x === 0x2286
|
||||
|| x === 0x2287
|
||||
|| x === 0x2295
|
||||
|| x === 0x2299
|
||||
|| x === 0x22A5
|
||||
|| x === 0x22BF
|
||||
|| x === 0x2312
|
||||
|| x >= 0x2460 && x <= 0x24E9
|
||||
|| x >= 0x24EB && x <= 0x254B
|
||||
|| x >= 0x2550 && x <= 0x2573
|
||||
|| x >= 0x2580 && x <= 0x258F
|
||||
|| x >= 0x2592 && x <= 0x2595
|
||||
|| x === 0x25A0
|
||||
|| x === 0x25A1
|
||||
|| x >= 0x25A3 && x <= 0x25A9
|
||||
|| x === 0x25B2
|
||||
|| x === 0x25B3
|
||||
|| x === 0x25B6
|
||||
|| x === 0x25B7
|
||||
|| x === 0x25BC
|
||||
|| x === 0x25BD
|
||||
|| x === 0x25C0
|
||||
|| x === 0x25C1
|
||||
|| x >= 0x25C6 && x <= 0x25C8
|
||||
|| x === 0x25CB
|
||||
|| x >= 0x25CE && x <= 0x25D1
|
||||
|| x >= 0x25E2 && x <= 0x25E5
|
||||
|| x === 0x25EF
|
||||
|| x === 0x2605
|
||||
|| x === 0x2606
|
||||
|| x === 0x2609
|
||||
|| x === 0x260E
|
||||
|| x === 0x260F
|
||||
|| x === 0x261C
|
||||
|| x === 0x261E
|
||||
|| x === 0x2640
|
||||
|| x === 0x2642
|
||||
|| x === 0x2660
|
||||
|| x === 0x2661
|
||||
|| x >= 0x2663 && x <= 0x2665
|
||||
|| x >= 0x2667 && x <= 0x266A
|
||||
|| x === 0x266C
|
||||
|| x === 0x266D
|
||||
|| x === 0x266F
|
||||
|| x === 0x269E
|
||||
|| x === 0x269F
|
||||
|| x === 0x26BF
|
||||
|| x >= 0x26C6 && x <= 0x26CD
|
||||
|| x >= 0x26CF && x <= 0x26D3
|
||||
|| x >= 0x26D5 && x <= 0x26E1
|
||||
|| x === 0x26E3
|
||||
|| x === 0x26E8
|
||||
|| x === 0x26E9
|
||||
|| x >= 0x26EB && x <= 0x26F1
|
||||
|| x === 0x26F4
|
||||
|| x >= 0x26F6 && x <= 0x26F9
|
||||
|| x === 0x26FB
|
||||
|| x === 0x26FC
|
||||
|| x === 0x26FE
|
||||
|| x === 0x26FF
|
||||
|| x === 0x273D
|
||||
|| x >= 0x2776 && x <= 0x277F
|
||||
|| x >= 0x2B56 && x <= 0x2B59
|
||||
|| x >= 0x3248 && x <= 0x324F
|
||||
|| x >= 0xE000 && x <= 0xF8FF
|
||||
|| x >= 0xFE00 && x <= 0xFE0F
|
||||
|| x === 0xFFFD
|
||||
|| x >= 0x1F100 && x <= 0x1F10A
|
||||
|| x >= 0x1F110 && x <= 0x1F12D
|
||||
|| x >= 0x1F130 && x <= 0x1F169
|
||||
|| x >= 0x1F170 && x <= 0x1F18D
|
||||
|| x === 0x1F18F
|
||||
|| x === 0x1F190
|
||||
|| x >= 0x1F19B && x <= 0x1F1AC
|
||||
|| x >= 0xE0100 && x <= 0xE01EF
|
||||
|| x >= 0xF0000 && x <= 0xFFFFD
|
||||
|| x >= 0x100000 && x <= 0x10FFFD;
|
||||
}
|
||||
|
||||
function isFullWidth(x) {
|
||||
return x === 0x3000
|
||||
|| x >= 0xFF01 && x <= 0xFF60
|
||||
|| x >= 0xFFE0 && x <= 0xFFE6;
|
||||
}
|
||||
|
||||
function isWide(x) {
|
||||
return x >= 0x1100 && x <= 0x115F
|
||||
|| x === 0x231A
|
||||
|| x === 0x231B
|
||||
|| x === 0x2329
|
||||
|| x === 0x232A
|
||||
|| x >= 0x23E9 && x <= 0x23EC
|
||||
|| x === 0x23F0
|
||||
|| x === 0x23F3
|
||||
|| x === 0x25FD
|
||||
|| x === 0x25FE
|
||||
|| x === 0x2614
|
||||
|| x === 0x2615
|
||||
|| x >= 0x2630 && x <= 0x2637
|
||||
|| x >= 0x2648 && x <= 0x2653
|
||||
|| x === 0x267F
|
||||
|| x >= 0x268A && x <= 0x268F
|
||||
|| x === 0x2693
|
||||
|| x === 0x26A1
|
||||
|| x === 0x26AA
|
||||
|| x === 0x26AB
|
||||
|| x === 0x26BD
|
||||
|| x === 0x26BE
|
||||
|| x === 0x26C4
|
||||
|| x === 0x26C5
|
||||
|| x === 0x26CE
|
||||
|| x === 0x26D4
|
||||
|| x === 0x26EA
|
||||
|| x === 0x26F2
|
||||
|| x === 0x26F3
|
||||
|| x === 0x26F5
|
||||
|| x === 0x26FA
|
||||
|| x === 0x26FD
|
||||
|| x === 0x2705
|
||||
|| x === 0x270A
|
||||
|| x === 0x270B
|
||||
|| x === 0x2728
|
||||
|| x === 0x274C
|
||||
|| x === 0x274E
|
||||
|| x >= 0x2753 && x <= 0x2755
|
||||
|| x === 0x2757
|
||||
|| x >= 0x2795 && x <= 0x2797
|
||||
|| x === 0x27B0
|
||||
|| x === 0x27BF
|
||||
|| x === 0x2B1B
|
||||
|| x === 0x2B1C
|
||||
|| x === 0x2B50
|
||||
|| x === 0x2B55
|
||||
|| x >= 0x2E80 && x <= 0x2E99
|
||||
|| x >= 0x2E9B && x <= 0x2EF3
|
||||
|| x >= 0x2F00 && x <= 0x2FD5
|
||||
|| x >= 0x2FF0 && x <= 0x2FFF
|
||||
|| x >= 0x3001 && x <= 0x303E
|
||||
|| x >= 0x3041 && x <= 0x3096
|
||||
|| x >= 0x3099 && x <= 0x30FF
|
||||
|| x >= 0x3105 && x <= 0x312F
|
||||
|| x >= 0x3131 && x <= 0x318E
|
||||
|| x >= 0x3190 && x <= 0x31E5
|
||||
|| x >= 0x31EF && x <= 0x321E
|
||||
|| x >= 0x3220 && x <= 0x3247
|
||||
|| x >= 0x3250 && x <= 0xA48C
|
||||
|| x >= 0xA490 && x <= 0xA4C6
|
||||
|| x >= 0xA960 && x <= 0xA97C
|
||||
|| x >= 0xAC00 && x <= 0xD7A3
|
||||
|| x >= 0xF900 && x <= 0xFAFF
|
||||
|| x >= 0xFE10 && x <= 0xFE19
|
||||
|| x >= 0xFE30 && x <= 0xFE52
|
||||
|| x >= 0xFE54 && x <= 0xFE66
|
||||
|| x >= 0xFE68 && x <= 0xFE6B
|
||||
|| x >= 0x16FE0 && x <= 0x16FE4
|
||||
|| x >= 0x16FF0 && x <= 0x16FF6
|
||||
|| x >= 0x17000 && x <= 0x18CD5
|
||||
|| x >= 0x18CFF && x <= 0x18D1E
|
||||
|| x >= 0x18D80 && x <= 0x18DF2
|
||||
|| x >= 0x1AFF0 && x <= 0x1AFF3
|
||||
|| x >= 0x1AFF5 && x <= 0x1AFFB
|
||||
|| x === 0x1AFFD
|
||||
|| x === 0x1AFFE
|
||||
|| x >= 0x1B000 && x <= 0x1B122
|
||||
|| x === 0x1B132
|
||||
|| x >= 0x1B150 && x <= 0x1B152
|
||||
|| x === 0x1B155
|
||||
|| x >= 0x1B164 && x <= 0x1B167
|
||||
|| x >= 0x1B170 && x <= 0x1B2FB
|
||||
|| x >= 0x1D300 && x <= 0x1D356
|
||||
|| x >= 0x1D360 && x <= 0x1D376
|
||||
|| x === 0x1F004
|
||||
|| x === 0x1F0CF
|
||||
|| x === 0x1F18E
|
||||
|| x >= 0x1F191 && x <= 0x1F19A
|
||||
|| x >= 0x1F200 && x <= 0x1F202
|
||||
|| x >= 0x1F210 && x <= 0x1F23B
|
||||
|| x >= 0x1F240 && x <= 0x1F248
|
||||
|| x === 0x1F250
|
||||
|| x === 0x1F251
|
||||
|| x >= 0x1F260 && x <= 0x1F265
|
||||
|| x >= 0x1F300 && x <= 0x1F320
|
||||
|| x >= 0x1F32D && x <= 0x1F335
|
||||
|| x >= 0x1F337 && x <= 0x1F37C
|
||||
|| x >= 0x1F37E && x <= 0x1F393
|
||||
|| x >= 0x1F3A0 && x <= 0x1F3CA
|
||||
|| x >= 0x1F3CF && x <= 0x1F3D3
|
||||
|| x >= 0x1F3E0 && x <= 0x1F3F0
|
||||
|| x === 0x1F3F4
|
||||
|| x >= 0x1F3F8 && x <= 0x1F43E
|
||||
|| x === 0x1F440
|
||||
|| x >= 0x1F442 && x <= 0x1F4FC
|
||||
|| x >= 0x1F4FF && x <= 0x1F53D
|
||||
|| x >= 0x1F54B && x <= 0x1F54E
|
||||
|| x >= 0x1F550 && x <= 0x1F567
|
||||
|| x === 0x1F57A
|
||||
|| x === 0x1F595
|
||||
|| x === 0x1F596
|
||||
|| x === 0x1F5A4
|
||||
|| x >= 0x1F5FB && x <= 0x1F64F
|
||||
|| x >= 0x1F680 && x <= 0x1F6C5
|
||||
|| x === 0x1F6CC
|
||||
|| x >= 0x1F6D0 && x <= 0x1F6D2
|
||||
|| x >= 0x1F6D5 && x <= 0x1F6D8
|
||||
|| x >= 0x1F6DC && x <= 0x1F6DF
|
||||
|| x === 0x1F6EB
|
||||
|| x === 0x1F6EC
|
||||
|| x >= 0x1F6F4 && x <= 0x1F6FC
|
||||
|| x >= 0x1F7E0 && x <= 0x1F7EB
|
||||
|| x === 0x1F7F0
|
||||
|| x >= 0x1F90C && x <= 0x1F93A
|
||||
|| x >= 0x1F93C && x <= 0x1F945
|
||||
|| x >= 0x1F947 && x <= 0x1F9FF
|
||||
|| x >= 0x1FA70 && x <= 0x1FA7C
|
||||
|| x >= 0x1FA80 && x <= 0x1FA8A
|
||||
|| x >= 0x1FA8E && x <= 0x1FAC6
|
||||
|| x === 0x1FAC8
|
||||
|| x >= 0x1FACD && x <= 0x1FADC
|
||||
|| x >= 0x1FADF && x <= 0x1FAEA
|
||||
|| x >= 0x1FAEF && x <= 0x1FAF8
|
||||
|| x >= 0x20000 && x <= 0x2FFFD
|
||||
|| x >= 0x30000 && x <= 0x3FFFD;
|
||||
}
|
||||
|
||||
function getCategory(x) {
|
||||
if (isAmbiguous(x)) return 'ambiguous';
|
||||
|
||||
if (isFullWidth(x)) return 'fullwidth';
|
||||
|
||||
if (
|
||||
x === 0x20A9
|
||||
|| x >= 0xFF61 && x <= 0xFFBE
|
||||
|| x >= 0xFFC2 && x <= 0xFFC7
|
||||
|| x >= 0xFFCA && x <= 0xFFCF
|
||||
|| x >= 0xFFD2 && x <= 0xFFD7
|
||||
|| x >= 0xFFDA && x <= 0xFFDC
|
||||
|| x >= 0xFFE8 && x <= 0xFFEE
|
||||
) {
|
||||
return 'halfwidth';
|
||||
}
|
||||
|
||||
if (
|
||||
x >= 0x20 && x <= 0x7E
|
||||
|| x === 0xA2
|
||||
|| x === 0xA3
|
||||
|| x === 0xA5
|
||||
|| x === 0xA6
|
||||
|| x === 0xAC
|
||||
|| x === 0xAF
|
||||
|| x >= 0x27E6 && x <= 0x27ED
|
||||
|| x === 0x2985
|
||||
|| x === 0x2986
|
||||
) {
|
||||
return 'narrow';
|
||||
}
|
||||
|
||||
if (isWide(x)) return 'wide';
|
||||
|
||||
return 'neutral';
|
||||
}
|
||||
|
||||
export {isAmbiguous, isFullWidth, isWide, getCategory};
|
||||
70
Jira_helper/node_modules/get-east-asian-width/package.json
generated
vendored
Normal file
70
Jira_helper/node_modules/get-east-asian-width/package.json
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
{
|
||||
"name": "get-east-asian-width",
|
||||
"version": "1.4.0",
|
||||
"description": "Determine the East Asian Width of a Unicode character",
|
||||
"license": "MIT",
|
||||
"repository": "sindresorhus/get-east-asian-width",
|
||||
"funding": "https://github.com/sponsors/sindresorhus",
|
||||
"author": {
|
||||
"name": "Sindre Sorhus",
|
||||
"email": "sindresorhus@gmail.com",
|
||||
"url": "https://sindresorhus.com"
|
||||
},
|
||||
"type": "module",
|
||||
"exports": {
|
||||
"types": "./index.d.ts",
|
||||
"default": "./index.js"
|
||||
},
|
||||
"sideEffects": false,
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "xo && ava && tsc index.d.ts",
|
||||
"build": "node scripts/build.js",
|
||||
"prepublish": "npm run build"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.d.ts",
|
||||
"lookup.js"
|
||||
],
|
||||
"keywords": [
|
||||
"unicode",
|
||||
"east-asian-width",
|
||||
"eastasianwidth",
|
||||
"character",
|
||||
"string",
|
||||
"width",
|
||||
"text",
|
||||
"layout",
|
||||
"alignment",
|
||||
"fullwidth",
|
||||
"halfwidth",
|
||||
"ambiguous",
|
||||
"narrow",
|
||||
"wide",
|
||||
"neutral",
|
||||
"typography",
|
||||
"japanese",
|
||||
"chinese",
|
||||
"korean",
|
||||
"codepoint",
|
||||
"text-processing",
|
||||
"i18n",
|
||||
"l10n"
|
||||
],
|
||||
"devDependencies": {
|
||||
"ava": "^5.3.1",
|
||||
"indent-string": "^5.0.0",
|
||||
"outdent": "^0.8.0",
|
||||
"simplify-ranges": "^0.1.0",
|
||||
"typescript": "^5.2.2",
|
||||
"xo": "^0.56.0"
|
||||
},
|
||||
"xo": {
|
||||
"ignores": [
|
||||
"lookup.js"
|
||||
]
|
||||
}
|
||||
}
|
||||
65
Jira_helper/node_modules/get-east-asian-width/readme.md
generated
vendored
Normal file
65
Jira_helper/node_modules/get-east-asian-width/readme.md
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
# get-east-asian-width
|
||||
|
||||
> Determine the [East Asian Width](https://unicode.org/reports/tr11/) of a Unicode character
|
||||
|
||||
> East Asian Width categorizes Unicode characters based on their occupied space in East Asian typography, which helps in text layout and alignment, particularly in languages like Japanese, Chinese, and Korean.
|
||||
|
||||
Unlike other similar packages, this package uses the latest Unicode data (which changes each year).
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
npm install get-east-asian-width
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import {eastAsianWidth, eastAsianWidthType} from 'get-east-asian-width';
|
||||
|
||||
const codePoint = '字'.codePointAt(0);
|
||||
|
||||
console.log(eastAsianWidth(codePoint));
|
||||
//=> 2
|
||||
|
||||
console.log(eastAsianWidthType(codePoint));
|
||||
//=> 'wide'
|
||||
```
|
||||
|
||||
## `eastAsianWidth(codePoint: number, options?: object): 1 | 2`
|
||||
|
||||
Returns the width as a number for the given code point.
|
||||
|
||||
### options
|
||||
|
||||
Type: `object`
|
||||
|
||||
#### ambiguousAsWide
|
||||
|
||||
Type: `boolean`\
|
||||
Default: `false`
|
||||
|
||||
Whether to treat an `'ambiguous'` character as wide.
|
||||
|
||||
```js
|
||||
import {eastAsianWidth} from 'get-east-asian-width';
|
||||
|
||||
const codePoint = '⛣'.codePointAt(0);
|
||||
|
||||
console.log(eastAsianWidth(codePoint));
|
||||
//=> 1
|
||||
|
||||
console.log(eastAsianWidth(codePoint, {ambiguousAsWide: true}));
|
||||
//=> 2
|
||||
```
|
||||
|
||||
> Ambiguous characters behave like wide or narrow characters depending on the context (language tag, script identification, associated font, source of data, or explicit markup; all can provide the context). **If the context cannot be established reliably, they should be treated as narrow characters by default.**
|
||||
> - http://www.unicode.org/reports/tr11/
|
||||
|
||||
## `eastAsianWidthType(codePoint: number): 'fullwidth' | 'halfwidth' | 'wide' | 'narrow' | 'neutral' | 'ambiguous'`
|
||||
|
||||
Returns the type of “East Asian Width” for the given code point.
|
||||
|
||||
## Related
|
||||
|
||||
- [string-width](https://github.com/sindresorhus/string-width) - Get the visual width of a string
|
||||
39
Jira_helper/node_modules/string-width/index.d.ts
generated
vendored
Normal file
39
Jira_helper/node_modules/string-width/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
export type Options = {
|
||||
/**
|
||||
Count [ambiguous width characters](https://www.unicode.org/reports/tr11/#Ambiguous) as having narrow width (count of 1) instead of wide width (count of 2).
|
||||
|
||||
@default true
|
||||
|
||||
> Ambiguous characters behave like wide or narrow characters depending on the context (language tag, script identification, associated font, source of data, or explicit markup; all can provide the context). __If the context cannot be established reliably, they should be treated as narrow characters by default.__
|
||||
> - http://www.unicode.org/reports/tr11/
|
||||
*/
|
||||
readonly ambiguousIsNarrow?: boolean;
|
||||
|
||||
/**
|
||||
Whether [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code) should be counted.
|
||||
|
||||
@default false
|
||||
*/
|
||||
readonly countAnsiEscapeCodes?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
Get the visual width of a string - the number of columns required to display it.
|
||||
|
||||
Some Unicode characters are [fullwidth](https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms) and use double the normal width. [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code) are stripped and doesn't affect the width.
|
||||
|
||||
@example
|
||||
```
|
||||
import stringWidth from 'string-width';
|
||||
|
||||
stringWidth('a');
|
||||
//=> 1
|
||||
|
||||
stringWidth('古');
|
||||
//=> 2
|
||||
|
||||
stringWidth('\u001B[1m古\u001B[22m');
|
||||
//=> 2
|
||||
```
|
||||
*/
|
||||
export default function stringWidth(string: string, options?: Options): number;
|
||||
82
Jira_helper/node_modules/string-width/index.js
generated
vendored
Normal file
82
Jira_helper/node_modules/string-width/index.js
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
import stripAnsi from 'strip-ansi';
|
||||
import {eastAsianWidth} from 'get-east-asian-width';
|
||||
import emojiRegex from 'emoji-regex';
|
||||
|
||||
const segmenter = new Intl.Segmenter();
|
||||
|
||||
const defaultIgnorableCodePointRegex = /^\p{Default_Ignorable_Code_Point}$/u;
|
||||
|
||||
export default function stringWidth(string, options = {}) {
|
||||
if (typeof string !== 'string' || string.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const {
|
||||
ambiguousIsNarrow = true,
|
||||
countAnsiEscapeCodes = false,
|
||||
} = options;
|
||||
|
||||
if (!countAnsiEscapeCodes) {
|
||||
string = stripAnsi(string);
|
||||
}
|
||||
|
||||
if (string.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let width = 0;
|
||||
const eastAsianWidthOptions = {ambiguousAsWide: !ambiguousIsNarrow};
|
||||
|
||||
for (const {segment: character} of segmenter.segment(string)) {
|
||||
const codePoint = character.codePointAt(0);
|
||||
|
||||
// Ignore control characters
|
||||
if (codePoint <= 0x1F || (codePoint >= 0x7F && codePoint <= 0x9F)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore zero-width characters
|
||||
if (
|
||||
(codePoint >= 0x20_0B && codePoint <= 0x20_0F) // Zero-width space, non-joiner, joiner, left-to-right mark, right-to-left mark
|
||||
|| codePoint === 0xFE_FF // Zero-width no-break space
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore combining characters
|
||||
if (
|
||||
(codePoint >= 0x3_00 && codePoint <= 0x3_6F) // Combining diacritical marks
|
||||
|| (codePoint >= 0x1A_B0 && codePoint <= 0x1A_FF) // Combining diacritical marks extended
|
||||
|| (codePoint >= 0x1D_C0 && codePoint <= 0x1D_FF) // Combining diacritical marks supplement
|
||||
|| (codePoint >= 0x20_D0 && codePoint <= 0x20_FF) // Combining diacritical marks for symbols
|
||||
|| (codePoint >= 0xFE_20 && codePoint <= 0xFE_2F) // Combining half marks
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore surrogate pairs
|
||||
if (codePoint >= 0xD8_00 && codePoint <= 0xDF_FF) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ignore variation selectors
|
||||
if (codePoint >= 0xFE_00 && codePoint <= 0xFE_0F) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// This covers some of the above cases, but we still keep them for performance reasons.
|
||||
if (defaultIgnorableCodePointRegex.test(character)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: Use `/\p{RGI_Emoji}/v` when targeting Node.js 20.
|
||||
if (emojiRegex().test(character)) {
|
||||
width += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
width += eastAsianWidth(codePoint, eastAsianWidthOptions);
|
||||
}
|
||||
|
||||
return width;
|
||||
}
|
||||
9
Jira_helper/node_modules/string-width/license
generated
vendored
Normal file
9
Jira_helper/node_modules/string-width/license
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
64
Jira_helper/node_modules/string-width/package.json
generated
vendored
Normal file
64
Jira_helper/node_modules/string-width/package.json
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"name": "string-width",
|
||||
"version": "7.2.0",
|
||||
"description": "Get the visual width of a string - the number of columns required to display it",
|
||||
"license": "MIT",
|
||||
"repository": "sindresorhus/string-width",
|
||||
"funding": "https://github.com/sponsors/sindresorhus",
|
||||
"author": {
|
||||
"name": "Sindre Sorhus",
|
||||
"email": "sindresorhus@gmail.com",
|
||||
"url": "https://sindresorhus.com"
|
||||
},
|
||||
"type": "module",
|
||||
"exports": {
|
||||
"types": "./index.d.ts",
|
||||
"default": "./index.js"
|
||||
},
|
||||
"sideEffects": false,
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "xo && ava && tsd"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.d.ts"
|
||||
],
|
||||
"keywords": [
|
||||
"string",
|
||||
"character",
|
||||
"unicode",
|
||||
"width",
|
||||
"visual",
|
||||
"column",
|
||||
"columns",
|
||||
"fullwidth",
|
||||
"full-width",
|
||||
"full",
|
||||
"ansi",
|
||||
"escape",
|
||||
"codes",
|
||||
"cli",
|
||||
"command-line",
|
||||
"terminal",
|
||||
"console",
|
||||
"cjk",
|
||||
"chinese",
|
||||
"japanese",
|
||||
"korean",
|
||||
"fixed-width",
|
||||
"east-asian-width"
|
||||
],
|
||||
"dependencies": {
|
||||
"emoji-regex": "^10.3.0",
|
||||
"get-east-asian-width": "^1.0.0",
|
||||
"strip-ansi": "^7.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"ava": "^5.3.1",
|
||||
"tsd": "^0.29.0",
|
||||
"xo": "^0.56.0"
|
||||
}
|
||||
}
|
||||
66
Jira_helper/node_modules/string-width/readme.md
generated
vendored
Normal file
66
Jira_helper/node_modules/string-width/readme.md
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
# string-width
|
||||
|
||||
> Get the visual width of a string - the number of columns required to display it
|
||||
|
||||
Some Unicode characters are [fullwidth](https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms) and use double the normal width. [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code) are stripped and doesn't affect the width.
|
||||
|
||||
Useful to be able to measure the actual width of command-line output.
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
npm install string-width
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import stringWidth from 'string-width';
|
||||
|
||||
stringWidth('a');
|
||||
//=> 1
|
||||
|
||||
stringWidth('古');
|
||||
//=> 2
|
||||
|
||||
stringWidth('\u001B[1m古\u001B[22m');
|
||||
//=> 2
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
### stringWidth(string, options?)
|
||||
|
||||
#### string
|
||||
|
||||
Type: `string`
|
||||
|
||||
The string to be counted.
|
||||
|
||||
#### options
|
||||
|
||||
Type: `object`
|
||||
|
||||
##### ambiguousIsNarrow
|
||||
|
||||
Type: `boolean`\
|
||||
Default: `true`
|
||||
|
||||
Count [ambiguous width characters](https://www.unicode.org/reports/tr11/#Ambiguous) as having narrow width (count of 1) instead of wide width (count of 2).
|
||||
|
||||
> Ambiguous characters behave like wide or narrow characters depending on the context (language tag, script identification, associated font, source of data, or explicit markup; all can provide the context). **If the context cannot be established reliably, they should be treated as narrow characters by default.**
|
||||
> - http://www.unicode.org/reports/tr11/
|
||||
|
||||
##### countAnsiEscapeCodes
|
||||
|
||||
Type: `boolean`\
|
||||
Default: `false`
|
||||
|
||||
Whether [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code) should be counted.
|
||||
|
||||
## Related
|
||||
|
||||
- [string-width-cli](https://github.com/sindresorhus/string-width-cli) - CLI for this module
|
||||
- [string-length](https://github.com/sindresorhus/string-length) - Get the real length of a string
|
||||
- [widest-line](https://github.com/sindresorhus/widest-line) - Get the visual width of the widest line in a string
|
||||
- [get-east-asian-width](https://github.com/sindresorhus/get-east-asian-width) - Determine the East Asian Width of a Unicode character
|
||||
15
Jira_helper/node_modules/strip-ansi/index.d.ts
generated
vendored
Normal file
15
Jira_helper/node_modules/strip-ansi/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
/**
|
||||
Strip [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code) from a string.
|
||||
|
||||
@example
|
||||
```
|
||||
import stripAnsi from 'strip-ansi';
|
||||
|
||||
stripAnsi('\u001B[4mUnicorn\u001B[0m');
|
||||
//=> 'Unicorn'
|
||||
|
||||
stripAnsi('\u001B]8;;https://github.com\u0007Click\u001B]8;;\u0007');
|
||||
//=> 'Click'
|
||||
```
|
||||
*/
|
||||
export default function stripAnsi(string: string): string;
|
||||
14
Jira_helper/node_modules/strip-ansi/index.js
generated
vendored
Normal file
14
Jira_helper/node_modules/strip-ansi/index.js
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
import ansiRegex from 'ansi-regex';
|
||||
|
||||
const regex = ansiRegex();
|
||||
|
||||
export default function stripAnsi(string) {
|
||||
if (typeof string !== 'string') {
|
||||
throw new TypeError(`Expected a \`string\`, got \`${typeof string}\``);
|
||||
}
|
||||
|
||||
// Even though the regex is global, we don't need to reset the `.lastIndex`
|
||||
// because unlike `.exec()` and `.test()`, `.replace()` does it automatically
|
||||
// and doing it manually has a performance penalty.
|
||||
return string.replace(regex, '');
|
||||
}
|
||||
9
Jira_helper/node_modules/strip-ansi/license
generated
vendored
Normal file
9
Jira_helper/node_modules/strip-ansi/license
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
59
Jira_helper/node_modules/strip-ansi/package.json
generated
vendored
Normal file
59
Jira_helper/node_modules/strip-ansi/package.json
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
{
|
||||
"name": "strip-ansi",
|
||||
"version": "7.1.2",
|
||||
"description": "Strip ANSI escape codes from a string",
|
||||
"license": "MIT",
|
||||
"repository": "chalk/strip-ansi",
|
||||
"funding": "https://github.com/chalk/strip-ansi?sponsor=1",
|
||||
"author": {
|
||||
"name": "Sindre Sorhus",
|
||||
"email": "sindresorhus@gmail.com",
|
||||
"url": "https://sindresorhus.com"
|
||||
},
|
||||
"type": "module",
|
||||
"exports": "./index.js",
|
||||
"types": "./index.d.ts",
|
||||
"sideEffects": false,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "xo && ava && tsd"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.d.ts"
|
||||
],
|
||||
"keywords": [
|
||||
"strip",
|
||||
"trim",
|
||||
"remove",
|
||||
"ansi",
|
||||
"styles",
|
||||
"color",
|
||||
"colour",
|
||||
"colors",
|
||||
"terminal",
|
||||
"console",
|
||||
"string",
|
||||
"tty",
|
||||
"escape",
|
||||
"formatting",
|
||||
"rgb",
|
||||
"256",
|
||||
"shell",
|
||||
"xterm",
|
||||
"log",
|
||||
"logging",
|
||||
"command-line",
|
||||
"text"
|
||||
],
|
||||
"dependencies": {
|
||||
"ansi-regex": "^6.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"ava": "^3.15.0",
|
||||
"tsd": "^0.17.0",
|
||||
"xo": "^0.44.0"
|
||||
}
|
||||
}
|
||||
37
Jira_helper/node_modules/strip-ansi/readme.md
generated
vendored
Normal file
37
Jira_helper/node_modules/strip-ansi/readme.md
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
# strip-ansi
|
||||
|
||||
> Strip [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code) from a string
|
||||
|
||||
> [!NOTE]
|
||||
> Node.js has this built-in now with [`stripVTControlCharacters`](https://nodejs.org/api/util.html#utilstripvtcontrolcharactersstr). The benefit of this package is consistent behavior across Node.js versions and faster improvements. The Node.js version is actually based on this package.
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
npm install strip-ansi
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import stripAnsi from 'strip-ansi';
|
||||
|
||||
stripAnsi('\u001B[4mUnicorn\u001B[0m');
|
||||
//=> 'Unicorn'
|
||||
|
||||
stripAnsi('\u001B]8;;https://github.com\u0007Click\u001B]8;;\u0007');
|
||||
//=> 'Click'
|
||||
```
|
||||
|
||||
## Related
|
||||
|
||||
- [strip-ansi-cli](https://github.com/chalk/strip-ansi-cli) - CLI for this module
|
||||
- [strip-ansi-stream](https://github.com/chalk/strip-ansi-stream) - Streaming version of this module
|
||||
- [has-ansi](https://github.com/chalk/has-ansi) - Check if a string has ANSI escape codes
|
||||
- [ansi-regex](https://github.com/chalk/ansi-regex) - Regular expression for matching ANSI escape codes
|
||||
- [chalk](https://github.com/chalk/chalk) - Terminal string styling done right
|
||||
|
||||
## Maintainers
|
||||
|
||||
- [Sindre Sorhus](https://github.com/sindresorhus)
|
||||
- [Josh Junon](https://github.com/qix-)
|
||||
41
Jira_helper/node_modules/wrap-ansi/index.d.ts
generated
vendored
Normal file
41
Jira_helper/node_modules/wrap-ansi/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
export type Options = {
|
||||
/**
|
||||
By default the wrap is soft, meaning long words may extend past the column width. Setting this to `true` will make it hard wrap at the column width.
|
||||
|
||||
@default false
|
||||
*/
|
||||
readonly hard?: boolean;
|
||||
|
||||
/**
|
||||
By default, an attempt is made to split words at spaces, ensuring that they don't extend past the configured columns. If wordWrap is `false`, each column will instead be completely filled splitting words as necessary.
|
||||
|
||||
@default true
|
||||
*/
|
||||
readonly wordWrap?: boolean;
|
||||
|
||||
/**
|
||||
Whitespace on all lines is removed by default. Set this option to `false` if you don't want to trim.
|
||||
|
||||
@default true
|
||||
*/
|
||||
readonly trim?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
Wrap words to the specified column width.
|
||||
|
||||
@param string - A string with ANSI escape codes, like one styled by [`chalk`](https://github.com/chalk/chalk). Newline characters will be normalized to `\n`.
|
||||
@param columns - The number of columns to wrap the text to.
|
||||
|
||||
@example
|
||||
```
|
||||
import chalk from 'chalk';
|
||||
import wrapAnsi from 'wrap-ansi';
|
||||
|
||||
const input = 'The quick brown ' + chalk.red('fox jumped over ') +
|
||||
'the lazy ' + chalk.green('dog and then ran away with the unicorn.');
|
||||
|
||||
console.log(wrapAnsi(input, 20));
|
||||
```
|
||||
*/
|
||||
export default function wrapAnsi(string: string, columns: number, options?: Options): string;
|
||||
222
Jira_helper/node_modules/wrap-ansi/index.js
generated
vendored
Executable file
222
Jira_helper/node_modules/wrap-ansi/index.js
generated
vendored
Executable file
@@ -0,0 +1,222 @@
|
||||
import stringWidth from 'string-width';
|
||||
import stripAnsi from 'strip-ansi';
|
||||
import ansiStyles from 'ansi-styles';
|
||||
|
||||
const ESCAPES = new Set([
|
||||
'\u001B',
|
||||
'\u009B',
|
||||
]);
|
||||
|
||||
const END_CODE = 39;
|
||||
const ANSI_ESCAPE_BELL = '\u0007';
|
||||
const ANSI_CSI = '[';
|
||||
const ANSI_OSC = ']';
|
||||
const ANSI_SGR_TERMINATOR = 'm';
|
||||
const ANSI_ESCAPE_LINK = `${ANSI_OSC}8;;`;
|
||||
|
||||
const wrapAnsiCode = code => `${ESCAPES.values().next().value}${ANSI_CSI}${code}${ANSI_SGR_TERMINATOR}`;
|
||||
const wrapAnsiHyperlink = url => `${ESCAPES.values().next().value}${ANSI_ESCAPE_LINK}${url}${ANSI_ESCAPE_BELL}`;
|
||||
|
||||
// Calculate the length of words split on ' ', ignoring
|
||||
// the extra characters added by ansi escape codes
|
||||
const wordLengths = string => string.split(' ').map(character => stringWidth(character));
|
||||
|
||||
// Wrap a long word across multiple rows
|
||||
// Ansi escape codes do not count towards length
|
||||
const wrapWord = (rows, word, columns) => {
|
||||
const characters = [...word];
|
||||
|
||||
let isInsideEscape = false;
|
||||
let isInsideLinkEscape = false;
|
||||
let visible = stringWidth(stripAnsi(rows.at(-1)));
|
||||
|
||||
for (const [index, character] of characters.entries()) {
|
||||
const characterLength = stringWidth(character);
|
||||
|
||||
if (visible + characterLength <= columns) {
|
||||
rows[rows.length - 1] += character;
|
||||
} else {
|
||||
rows.push(character);
|
||||
visible = 0;
|
||||
}
|
||||
|
||||
if (ESCAPES.has(character)) {
|
||||
isInsideEscape = true;
|
||||
|
||||
const ansiEscapeLinkCandidate = characters.slice(index + 1, index + 1 + ANSI_ESCAPE_LINK.length).join('');
|
||||
isInsideLinkEscape = ansiEscapeLinkCandidate === ANSI_ESCAPE_LINK;
|
||||
}
|
||||
|
||||
if (isInsideEscape) {
|
||||
if (isInsideLinkEscape) {
|
||||
if (character === ANSI_ESCAPE_BELL) {
|
||||
isInsideEscape = false;
|
||||
isInsideLinkEscape = false;
|
||||
}
|
||||
} else if (character === ANSI_SGR_TERMINATOR) {
|
||||
isInsideEscape = false;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
visible += characterLength;
|
||||
|
||||
if (visible === columns && index < characters.length - 1) {
|
||||
rows.push('');
|
||||
visible = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// It's possible that the last row we copy over is only
|
||||
// ansi escape characters, handle this edge-case
|
||||
if (!visible && rows.at(-1).length > 0 && rows.length > 1) {
|
||||
rows[rows.length - 2] += rows.pop();
|
||||
}
|
||||
};
|
||||
|
||||
// Trims spaces from a string ignoring invisible sequences
|
||||
const stringVisibleTrimSpacesRight = string => {
|
||||
const words = string.split(' ');
|
||||
let last = words.length;
|
||||
|
||||
while (last > 0) {
|
||||
if (stringWidth(words[last - 1]) > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
last--;
|
||||
}
|
||||
|
||||
if (last === words.length) {
|
||||
return string;
|
||||
}
|
||||
|
||||
return words.slice(0, last).join(' ') + words.slice(last).join('');
|
||||
};
|
||||
|
||||
// The wrap-ansi module can be invoked in either 'hard' or 'soft' wrap mode.
|
||||
//
|
||||
// 'hard' will never allow a string to take up more than columns characters.
|
||||
//
|
||||
// 'soft' allows long words to expand past the column length.
|
||||
const exec = (string, columns, options = {}) => {
|
||||
if (options.trim !== false && string.trim() === '') {
|
||||
return '';
|
||||
}
|
||||
|
||||
let returnValue = '';
|
||||
let escapeCode;
|
||||
let escapeUrl;
|
||||
|
||||
const lengths = wordLengths(string);
|
||||
let rows = [''];
|
||||
|
||||
for (const [index, word] of string.split(' ').entries()) {
|
||||
if (options.trim !== false) {
|
||||
rows[rows.length - 1] = rows.at(-1).trimStart();
|
||||
}
|
||||
|
||||
let rowLength = stringWidth(rows.at(-1));
|
||||
|
||||
if (index !== 0) {
|
||||
if (rowLength >= columns && (options.wordWrap === false || options.trim === false)) {
|
||||
// If we start with a new word but the current row length equals the length of the columns, add a new row
|
||||
rows.push('');
|
||||
rowLength = 0;
|
||||
}
|
||||
|
||||
if (rowLength > 0 || options.trim === false) {
|
||||
rows[rows.length - 1] += ' ';
|
||||
rowLength++;
|
||||
}
|
||||
}
|
||||
|
||||
// In 'hard' wrap mode, the length of a line is never allowed to extend past 'columns'
|
||||
if (options.hard && lengths[index] > columns) {
|
||||
const remainingColumns = (columns - rowLength);
|
||||
const breaksStartingThisLine = 1 + Math.floor((lengths[index] - remainingColumns - 1) / columns);
|
||||
const breaksStartingNextLine = Math.floor((lengths[index] - 1) / columns);
|
||||
if (breaksStartingNextLine < breaksStartingThisLine) {
|
||||
rows.push('');
|
||||
}
|
||||
|
||||
wrapWord(rows, word, columns);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rowLength + lengths[index] > columns && rowLength > 0 && lengths[index] > 0) {
|
||||
if (options.wordWrap === false && rowLength < columns) {
|
||||
wrapWord(rows, word, columns);
|
||||
continue;
|
||||
}
|
||||
|
||||
rows.push('');
|
||||
}
|
||||
|
||||
if (rowLength + lengths[index] > columns && options.wordWrap === false) {
|
||||
wrapWord(rows, word, columns);
|
||||
continue;
|
||||
}
|
||||
|
||||
rows[rows.length - 1] += word;
|
||||
}
|
||||
|
||||
if (options.trim !== false) {
|
||||
rows = rows.map(row => stringVisibleTrimSpacesRight(row));
|
||||
}
|
||||
|
||||
const preString = rows.join('\n');
|
||||
const pre = [...preString];
|
||||
|
||||
// We need to keep a separate index as `String#slice()` works on Unicode code units, while `pre` is an array of codepoints.
|
||||
let preStringIndex = 0;
|
||||
|
||||
for (const [index, character] of pre.entries()) {
|
||||
returnValue += character;
|
||||
|
||||
if (ESCAPES.has(character)) {
|
||||
const {groups} = new RegExp(`(?:\\${ANSI_CSI}(?<code>\\d+)m|\\${ANSI_ESCAPE_LINK}(?<uri>.*)${ANSI_ESCAPE_BELL})`).exec(preString.slice(preStringIndex)) || {groups: {}};
|
||||
if (groups.code !== undefined) {
|
||||
const code = Number.parseFloat(groups.code);
|
||||
escapeCode = code === END_CODE ? undefined : code;
|
||||
} else if (groups.uri !== undefined) {
|
||||
escapeUrl = groups.uri.length === 0 ? undefined : groups.uri;
|
||||
}
|
||||
}
|
||||
|
||||
const code = ansiStyles.codes.get(Number(escapeCode));
|
||||
|
||||
if (pre[index + 1] === '\n') {
|
||||
if (escapeUrl) {
|
||||
returnValue += wrapAnsiHyperlink('');
|
||||
}
|
||||
|
||||
if (escapeCode && code) {
|
||||
returnValue += wrapAnsiCode(code);
|
||||
}
|
||||
} else if (character === '\n') {
|
||||
if (escapeCode && code) {
|
||||
returnValue += wrapAnsiCode(escapeCode);
|
||||
}
|
||||
|
||||
if (escapeUrl) {
|
||||
returnValue += wrapAnsiHyperlink(escapeUrl);
|
||||
}
|
||||
}
|
||||
|
||||
preStringIndex += character.length;
|
||||
}
|
||||
|
||||
return returnValue;
|
||||
};
|
||||
|
||||
// For each newline, invoke the method separately
|
||||
export default function wrapAnsi(string, columns, options) {
|
||||
return String(string)
|
||||
.normalize()
|
||||
.replaceAll('\r\n', '\n')
|
||||
.split('\n')
|
||||
.map(line => exec(line, columns, options))
|
||||
.join('\n');
|
||||
}
|
||||
9
Jira_helper/node_modules/wrap-ansi/license
generated
vendored
Normal file
9
Jira_helper/node_modules/wrap-ansi/license
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
69
Jira_helper/node_modules/wrap-ansi/package.json
generated
vendored
Normal file
69
Jira_helper/node_modules/wrap-ansi/package.json
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"name": "wrap-ansi",
|
||||
"version": "9.0.2",
|
||||
"description": "Wordwrap a string with ANSI escape codes",
|
||||
"license": "MIT",
|
||||
"repository": "chalk/wrap-ansi",
|
||||
"funding": "https://github.com/chalk/wrap-ansi?sponsor=1",
|
||||
"author": {
|
||||
"name": "Sindre Sorhus",
|
||||
"email": "sindresorhus@gmail.com",
|
||||
"url": "https://sindresorhus.com"
|
||||
},
|
||||
"type": "module",
|
||||
"exports": {
|
||||
"types": "./index.d.ts",
|
||||
"default": "./index.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "xo && nyc ava && tsd"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"index.d.ts"
|
||||
],
|
||||
"keywords": [
|
||||
"wrap",
|
||||
"break",
|
||||
"wordwrap",
|
||||
"wordbreak",
|
||||
"linewrap",
|
||||
"ansi",
|
||||
"styles",
|
||||
"color",
|
||||
"colour",
|
||||
"colors",
|
||||
"terminal",
|
||||
"console",
|
||||
"cli",
|
||||
"string",
|
||||
"tty",
|
||||
"escape",
|
||||
"formatting",
|
||||
"rgb",
|
||||
"256",
|
||||
"shell",
|
||||
"xterm",
|
||||
"log",
|
||||
"logging",
|
||||
"command-line",
|
||||
"text"
|
||||
],
|
||||
"dependencies": {
|
||||
"ansi-styles": "^6.2.1",
|
||||
"string-width": "^7.0.0",
|
||||
"strip-ansi": "^7.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"ava": "^5.3.1",
|
||||
"chalk": "^5.3.0",
|
||||
"coveralls": "^3.1.1",
|
||||
"has-ansi": "^5.0.1",
|
||||
"nyc": "^15.1.0",
|
||||
"tsd": "^0.29.0",
|
||||
"xo": "^0.56.0"
|
||||
}
|
||||
}
|
||||
75
Jira_helper/node_modules/wrap-ansi/readme.md
generated
vendored
Normal file
75
Jira_helper/node_modules/wrap-ansi/readme.md
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
# wrap-ansi
|
||||
|
||||
> Wordwrap a string with [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code#Colors_and_Styles)
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
npm install wrap-ansi
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import chalk from 'chalk';
|
||||
import wrapAnsi from 'wrap-ansi';
|
||||
|
||||
const input = 'The quick brown ' + chalk.red('fox jumped over ') +
|
||||
'the lazy ' + chalk.green('dog and then ran away with the unicorn.');
|
||||
|
||||
console.log(wrapAnsi(input, 20));
|
||||
```
|
||||
|
||||
<img width="331" src="screenshot.png">
|
||||
|
||||
## API
|
||||
|
||||
### wrapAnsi(string, columns, options?)
|
||||
|
||||
Wrap words to the specified column width.
|
||||
|
||||
#### string
|
||||
|
||||
Type: `string`
|
||||
|
||||
A string with ANSI escape codes, like one styled by [`chalk`](https://github.com/chalk/chalk).
|
||||
|
||||
Newline characters will be normalized to `\n`.
|
||||
|
||||
#### columns
|
||||
|
||||
Type: `number`
|
||||
|
||||
The number of columns to wrap the text to.
|
||||
|
||||
#### options
|
||||
|
||||
Type: `object`
|
||||
|
||||
##### hard
|
||||
|
||||
Type: `boolean`\
|
||||
Default: `false`
|
||||
|
||||
By default the wrap is soft, meaning long words may extend past the column width. Setting this to `true` will make it hard wrap at the column width.
|
||||
|
||||
##### wordWrap
|
||||
|
||||
Type: `boolean`\
|
||||
Default: `true`
|
||||
|
||||
By default, an attempt is made to split words at spaces, ensuring that they don't extend past the configured columns. If wordWrap is `false`, each column will instead be completely filled splitting words as necessary.
|
||||
|
||||
##### trim
|
||||
|
||||
Type: `boolean`\
|
||||
Default: `true`
|
||||
|
||||
Whitespace on all lines is removed by default. Set this option to `false` if you don't want to trim.
|
||||
|
||||
## Related
|
||||
|
||||
- [slice-ansi](https://github.com/chalk/slice-ansi) - Slice a string with ANSI escape codes
|
||||
- [cli-truncate](https://github.com/sindresorhus/cli-truncate) - Truncate a string to a specific width in the terminal
|
||||
- [chalk](https://github.com/chalk/chalk) - Terminal string styling done right
|
||||
- [jsesc](https://github.com/mathiasbynens/jsesc) - Generate ASCII-only output from Unicode strings. Useful for creating test fixtures.
|
||||
100
Jira_helper/node_modules/y18n/CHANGELOG.md
generated
vendored
Normal file
100
Jira_helper/node_modules/y18n/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
# Change Log
|
||||
|
||||
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
|
||||
|
||||
### [5.0.8](https://www.github.com/yargs/y18n/compare/v5.0.7...v5.0.8) (2021-04-07)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deno:** force modern release for Deno ([b1c215a](https://www.github.com/yargs/y18n/commit/b1c215aed714bee5830e76de3e335504dc2c4dab))
|
||||
|
||||
### [5.0.7](https://www.github.com/yargs/y18n/compare/v5.0.6...v5.0.7) (2021-04-07)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deno:** force release for deno ([#121](https://www.github.com/yargs/y18n/issues/121)) ([d3f2560](https://www.github.com/yargs/y18n/commit/d3f2560e6cedf2bfa2352e9eec044da53f9a06b2))
|
||||
|
||||
### [5.0.6](https://www.github.com/yargs/y18n/compare/v5.0.5...v5.0.6) (2021-04-05)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **webpack:** skip readFileSync if not defined ([#117](https://www.github.com/yargs/y18n/issues/117)) ([6966fa9](https://www.github.com/yargs/y18n/commit/6966fa91d2881cc6a6c531e836099e01f4da1616))
|
||||
|
||||
### [5.0.5](https://www.github.com/yargs/y18n/compare/v5.0.4...v5.0.5) (2020-10-25)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* address prototype pollution issue ([#108](https://www.github.com/yargs/y18n/issues/108)) ([a9ac604](https://www.github.com/yargs/y18n/commit/a9ac604abf756dec9687be3843e2c93bfe581f25))
|
||||
|
||||
### [5.0.4](https://www.github.com/yargs/y18n/compare/v5.0.3...v5.0.4) (2020-10-16)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **exports:** node 13.0 and 13.1 require the dotted object form _with_ a string fallback ([#105](https://www.github.com/yargs/y18n/issues/105)) ([4f85d80](https://www.github.com/yargs/y18n/commit/4f85d80dbaae6d2c7899ae394f7ad97805df4886))
|
||||
|
||||
### [5.0.3](https://www.github.com/yargs/y18n/compare/v5.0.2...v5.0.3) (2020-10-16)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **exports:** node 13.0-13.6 require a string fallback ([#103](https://www.github.com/yargs/y18n/issues/103)) ([e39921e](https://www.github.com/yargs/y18n/commit/e39921e1017f88f5d8ea97ddea854ffe92d68e74))
|
||||
|
||||
### [5.0.2](https://www.github.com/yargs/y18n/compare/v5.0.1...v5.0.2) (2020-10-01)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deno:** update types for deno ^1.4.0 ([#100](https://www.github.com/yargs/y18n/issues/100)) ([3834d9a](https://www.github.com/yargs/y18n/commit/3834d9ab1332f2937c935ada5e76623290efae81))
|
||||
|
||||
### [5.0.1](https://www.github.com/yargs/y18n/compare/v5.0.0...v5.0.1) (2020-09-05)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* main had old index path ([#98](https://www.github.com/yargs/y18n/issues/98)) ([124f7b0](https://www.github.com/yargs/y18n/commit/124f7b047ba9596bdbdf64459988304e77f3de1b))
|
||||
|
||||
## [5.0.0](https://www.github.com/yargs/y18n/compare/v4.0.0...v5.0.0) (2020-09-05)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* exports maps are now used, which modifies import behavior.
|
||||
* drops Node 6 and 4. begin following Node.js LTS schedule (#89)
|
||||
|
||||
### Features
|
||||
|
||||
* add support for ESM and Deno [#95](https://www.github.com/yargs/y18n/issues/95)) ([4d7ae94](https://www.github.com/yargs/y18n/commit/4d7ae94bcb42e84164e2180366474b1cd321ed94))
|
||||
|
||||
|
||||
### Build System
|
||||
|
||||
* drops Node 6 and 4. begin following Node.js LTS schedule ([#89](https://www.github.com/yargs/y18n/issues/89)) ([3cc0c28](https://www.github.com/yargs/y18n/commit/3cc0c287240727b84eaf1927f903612ec80f5e43))
|
||||
|
||||
### 4.0.1 (2020-10-25)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* address prototype pollution issue ([#108](https://www.github.com/yargs/y18n/issues/108)) ([a9ac604](https://www.github.com/yargs/y18n/commit/7de58ca0d315990cdb38234e97fc66254cdbcd71))
|
||||
|
||||
## [4.0.0](https://github.com/yargs/y18n/compare/v3.2.1...v4.0.0) (2017-10-10)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* allow support for falsy values like 0 in tagged literal ([#45](https://github.com/yargs/y18n/issues/45)) ([c926123](https://github.com/yargs/y18n/commit/c926123))
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **__:** added tagged template literal support ([#44](https://github.com/yargs/y18n/issues/44)) ([0598daf](https://github.com/yargs/y18n/commit/0598daf))
|
||||
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
* **__:** dropping Node 0.10/Node 0.12 support
|
||||
13
Jira_helper/node_modules/y18n/LICENSE
generated
vendored
Normal file
13
Jira_helper/node_modules/y18n/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
Copyright (c) 2015, Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any purpose
|
||||
with or without fee is hereby granted, provided that the above copyright notice
|
||||
and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
|
||||
THIS SOFTWARE.
|
||||
127
Jira_helper/node_modules/y18n/README.md
generated
vendored
Normal file
127
Jira_helper/node_modules/y18n/README.md
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
# y18n
|
||||
|
||||
[![NPM version][npm-image]][npm-url]
|
||||
[![js-standard-style][standard-image]][standard-url]
|
||||
[](https://conventionalcommits.org)
|
||||
|
||||
The bare-bones internationalization library used by yargs.
|
||||
|
||||
Inspired by [i18n](https://www.npmjs.com/package/i18n).
|
||||
|
||||
## Examples
|
||||
|
||||
_simple string translation:_
|
||||
|
||||
```js
|
||||
const __ = require('y18n')().__;
|
||||
|
||||
console.log(__('my awesome string %s', 'foo'));
|
||||
```
|
||||
|
||||
output:
|
||||
|
||||
`my awesome string foo`
|
||||
|
||||
_using tagged template literals_
|
||||
|
||||
```js
|
||||
const __ = require('y18n')().__;
|
||||
|
||||
const str = 'foo';
|
||||
|
||||
console.log(__`my awesome string ${str}`);
|
||||
```
|
||||
|
||||
output:
|
||||
|
||||
`my awesome string foo`
|
||||
|
||||
_pluralization support:_
|
||||
|
||||
```js
|
||||
const __n = require('y18n')().__n;
|
||||
|
||||
console.log(__n('one fish %s', '%d fishes %s', 2, 'foo'));
|
||||
```
|
||||
|
||||
output:
|
||||
|
||||
`2 fishes foo`
|
||||
|
||||
## Deno Example
|
||||
|
||||
As of `v5` `y18n` supports [Deno](https://github.com/denoland/deno):
|
||||
|
||||
```typescript
|
||||
import y18n from "https://deno.land/x/y18n/deno.ts";
|
||||
|
||||
const __ = y18n({
|
||||
locale: 'pirate',
|
||||
directory: './test/locales'
|
||||
}).__
|
||||
|
||||
console.info(__`Hi, ${'Ben'} ${'Coe'}!`)
|
||||
```
|
||||
|
||||
You will need to run with `--allow-read` to load alternative locales.
|
||||
|
||||
## JSON Language Files
|
||||
|
||||
The JSON language files should be stored in a `./locales` folder.
|
||||
File names correspond to locales, e.g., `en.json`, `pirate.json`.
|
||||
|
||||
When strings are observed for the first time they will be
|
||||
added to the JSON file corresponding to the current locale.
|
||||
|
||||
## Methods
|
||||
|
||||
### require('y18n')(config)
|
||||
|
||||
Create an instance of y18n with the config provided, options include:
|
||||
|
||||
* `directory`: the locale directory, default `./locales`.
|
||||
* `updateFiles`: should newly observed strings be updated in file, default `true`.
|
||||
* `locale`: what locale should be used.
|
||||
* `fallbackToLanguage`: should fallback to a language-only file (e.g. `en.json`)
|
||||
be allowed if a file matching the locale does not exist (e.g. `en_US.json`),
|
||||
default `true`.
|
||||
|
||||
### y18n.\_\_(str, arg, arg, arg)
|
||||
|
||||
Print a localized string, `%s` will be replaced with `arg`s.
|
||||
|
||||
This function can also be used as a tag for a template literal. You can use it
|
||||
like this: <code>__`hello ${'world'}`</code>. This will be equivalent to
|
||||
`__('hello %s', 'world')`.
|
||||
|
||||
### y18n.\_\_n(singularString, pluralString, count, arg, arg, arg)
|
||||
|
||||
Print a localized string with appropriate pluralization. If `%d` is provided
|
||||
in the string, the `count` will replace this placeholder.
|
||||
|
||||
### y18n.setLocale(str)
|
||||
|
||||
Set the current locale being used.
|
||||
|
||||
### y18n.getLocale()
|
||||
|
||||
What locale is currently being used?
|
||||
|
||||
### y18n.updateLocale(obj)
|
||||
|
||||
Update the current locale with the key value pairs in `obj`.
|
||||
|
||||
## Supported Node.js Versions
|
||||
|
||||
Libraries in this ecosystem make a best effort to track
|
||||
[Node.js' release schedule](https://nodejs.org/en/about/releases/). Here's [a
|
||||
post on why we think this is important](https://medium.com/the-node-js-collection/maintainers-should-consider-following-node-js-release-schedule-ab08ed4de71a).
|
||||
|
||||
## License
|
||||
|
||||
ISC
|
||||
|
||||
[npm-url]: https://npmjs.org/package/y18n
|
||||
[npm-image]: https://img.shields.io/npm/v/y18n.svg
|
||||
[standard-image]: https://img.shields.io/badge/code%20style-standard-brightgreen.svg
|
||||
[standard-url]: https://github.com/feross/standard
|
||||
203
Jira_helper/node_modules/y18n/build/index.cjs
generated
vendored
Normal file
203
Jira_helper/node_modules/y18n/build/index.cjs
generated
vendored
Normal file
@@ -0,0 +1,203 @@
|
||||
'use strict';
|
||||
|
||||
var fs = require('fs');
|
||||
var util = require('util');
|
||||
var path = require('path');
|
||||
|
||||
let shim;
|
||||
class Y18N {
|
||||
constructor(opts) {
|
||||
// configurable options.
|
||||
opts = opts || {};
|
||||
this.directory = opts.directory || './locales';
|
||||
this.updateFiles = typeof opts.updateFiles === 'boolean' ? opts.updateFiles : true;
|
||||
this.locale = opts.locale || 'en';
|
||||
this.fallbackToLanguage = typeof opts.fallbackToLanguage === 'boolean' ? opts.fallbackToLanguage : true;
|
||||
// internal stuff.
|
||||
this.cache = Object.create(null);
|
||||
this.writeQueue = [];
|
||||
}
|
||||
__(...args) {
|
||||
if (typeof arguments[0] !== 'string') {
|
||||
return this._taggedLiteral(arguments[0], ...arguments);
|
||||
}
|
||||
const str = args.shift();
|
||||
let cb = function () { }; // start with noop.
|
||||
if (typeof args[args.length - 1] === 'function')
|
||||
cb = args.pop();
|
||||
cb = cb || function () { }; // noop.
|
||||
if (!this.cache[this.locale])
|
||||
this._readLocaleFile();
|
||||
// we've observed a new string, update the language file.
|
||||
if (!this.cache[this.locale][str] && this.updateFiles) {
|
||||
this.cache[this.locale][str] = str;
|
||||
// include the current directory and locale,
|
||||
// since these values could change before the
|
||||
// write is performed.
|
||||
this._enqueueWrite({
|
||||
directory: this.directory,
|
||||
locale: this.locale,
|
||||
cb
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
return shim.format.apply(shim.format, [this.cache[this.locale][str] || str].concat(args));
|
||||
}
|
||||
__n() {
|
||||
const args = Array.prototype.slice.call(arguments);
|
||||
const singular = args.shift();
|
||||
const plural = args.shift();
|
||||
const quantity = args.shift();
|
||||
let cb = function () { }; // start with noop.
|
||||
if (typeof args[args.length - 1] === 'function')
|
||||
cb = args.pop();
|
||||
if (!this.cache[this.locale])
|
||||
this._readLocaleFile();
|
||||
let str = quantity === 1 ? singular : plural;
|
||||
if (this.cache[this.locale][singular]) {
|
||||
const entry = this.cache[this.locale][singular];
|
||||
str = entry[quantity === 1 ? 'one' : 'other'];
|
||||
}
|
||||
// we've observed a new string, update the language file.
|
||||
if (!this.cache[this.locale][singular] && this.updateFiles) {
|
||||
this.cache[this.locale][singular] = {
|
||||
one: singular,
|
||||
other: plural
|
||||
};
|
||||
// include the current directory and locale,
|
||||
// since these values could change before the
|
||||
// write is performed.
|
||||
this._enqueueWrite({
|
||||
directory: this.directory,
|
||||
locale: this.locale,
|
||||
cb
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
// if a %d placeholder is provided, add quantity
|
||||
// to the arguments expanded by util.format.
|
||||
const values = [str];
|
||||
if (~str.indexOf('%d'))
|
||||
values.push(quantity);
|
||||
return shim.format.apply(shim.format, values.concat(args));
|
||||
}
|
||||
setLocale(locale) {
|
||||
this.locale = locale;
|
||||
}
|
||||
getLocale() {
|
||||
return this.locale;
|
||||
}
|
||||
updateLocale(obj) {
|
||||
if (!this.cache[this.locale])
|
||||
this._readLocaleFile();
|
||||
for (const key in obj) {
|
||||
if (Object.prototype.hasOwnProperty.call(obj, key)) {
|
||||
this.cache[this.locale][key] = obj[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
_taggedLiteral(parts, ...args) {
|
||||
let str = '';
|
||||
parts.forEach(function (part, i) {
|
||||
const arg = args[i + 1];
|
||||
str += part;
|
||||
if (typeof arg !== 'undefined') {
|
||||
str += '%s';
|
||||
}
|
||||
});
|
||||
return this.__.apply(this, [str].concat([].slice.call(args, 1)));
|
||||
}
|
||||
_enqueueWrite(work) {
|
||||
this.writeQueue.push(work);
|
||||
if (this.writeQueue.length === 1)
|
||||
this._processWriteQueue();
|
||||
}
|
||||
_processWriteQueue() {
|
||||
const _this = this;
|
||||
const work = this.writeQueue[0];
|
||||
// destructure the enqueued work.
|
||||
const directory = work.directory;
|
||||
const locale = work.locale;
|
||||
const cb = work.cb;
|
||||
const languageFile = this._resolveLocaleFile(directory, locale);
|
||||
const serializedLocale = JSON.stringify(this.cache[locale], null, 2);
|
||||
shim.fs.writeFile(languageFile, serializedLocale, 'utf-8', function (err) {
|
||||
_this.writeQueue.shift();
|
||||
if (_this.writeQueue.length > 0)
|
||||
_this._processWriteQueue();
|
||||
cb(err);
|
||||
});
|
||||
}
|
||||
_readLocaleFile() {
|
||||
let localeLookup = {};
|
||||
const languageFile = this._resolveLocaleFile(this.directory, this.locale);
|
||||
try {
|
||||
// When using a bundler such as webpack, readFileSync may not be defined:
|
||||
if (shim.fs.readFileSync) {
|
||||
localeLookup = JSON.parse(shim.fs.readFileSync(languageFile, 'utf-8'));
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
if (err instanceof SyntaxError) {
|
||||
err.message = 'syntax error in ' + languageFile;
|
||||
}
|
||||
if (err.code === 'ENOENT')
|
||||
localeLookup = {};
|
||||
else
|
||||
throw err;
|
||||
}
|
||||
this.cache[this.locale] = localeLookup;
|
||||
}
|
||||
_resolveLocaleFile(directory, locale) {
|
||||
let file = shim.resolve(directory, './', locale + '.json');
|
||||
if (this.fallbackToLanguage && !this._fileExistsSync(file) && ~locale.lastIndexOf('_')) {
|
||||
// attempt fallback to language only
|
||||
const languageFile = shim.resolve(directory, './', locale.split('_')[0] + '.json');
|
||||
if (this._fileExistsSync(languageFile))
|
||||
file = languageFile;
|
||||
}
|
||||
return file;
|
||||
}
|
||||
_fileExistsSync(file) {
|
||||
return shim.exists(file);
|
||||
}
|
||||
}
|
||||
function y18n$1(opts, _shim) {
|
||||
shim = _shim;
|
||||
const y18n = new Y18N(opts);
|
||||
return {
|
||||
__: y18n.__.bind(y18n),
|
||||
__n: y18n.__n.bind(y18n),
|
||||
setLocale: y18n.setLocale.bind(y18n),
|
||||
getLocale: y18n.getLocale.bind(y18n),
|
||||
updateLocale: y18n.updateLocale.bind(y18n),
|
||||
locale: y18n.locale
|
||||
};
|
||||
}
|
||||
|
||||
var nodePlatformShim = {
|
||||
fs: {
|
||||
readFileSync: fs.readFileSync,
|
||||
writeFile: fs.writeFile
|
||||
},
|
||||
format: util.format,
|
||||
resolve: path.resolve,
|
||||
exists: (file) => {
|
||||
try {
|
||||
return fs.statSync(file).isFile();
|
||||
}
|
||||
catch (err) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const y18n = (opts) => {
|
||||
return y18n$1(opts, nodePlatformShim);
|
||||
};
|
||||
|
||||
module.exports = y18n;
|
||||
6
Jira_helper/node_modules/y18n/build/lib/cjs.js
generated
vendored
Normal file
6
Jira_helper/node_modules/y18n/build/lib/cjs.js
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
import { y18n as _y18n } from './index.js';
|
||||
import nodePlatformShim from './platform-shims/node.js';
|
||||
const y18n = (opts) => {
|
||||
return _y18n(opts, nodePlatformShim);
|
||||
};
|
||||
export default y18n;
|
||||
174
Jira_helper/node_modules/y18n/build/lib/index.js
generated
vendored
Normal file
174
Jira_helper/node_modules/y18n/build/lib/index.js
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
let shim;
|
||||
class Y18N {
|
||||
constructor(opts) {
|
||||
// configurable options.
|
||||
opts = opts || {};
|
||||
this.directory = opts.directory || './locales';
|
||||
this.updateFiles = typeof opts.updateFiles === 'boolean' ? opts.updateFiles : true;
|
||||
this.locale = opts.locale || 'en';
|
||||
this.fallbackToLanguage = typeof opts.fallbackToLanguage === 'boolean' ? opts.fallbackToLanguage : true;
|
||||
// internal stuff.
|
||||
this.cache = Object.create(null);
|
||||
this.writeQueue = [];
|
||||
}
|
||||
__(...args) {
|
||||
if (typeof arguments[0] !== 'string') {
|
||||
return this._taggedLiteral(arguments[0], ...arguments);
|
||||
}
|
||||
const str = args.shift();
|
||||
let cb = function () { }; // start with noop.
|
||||
if (typeof args[args.length - 1] === 'function')
|
||||
cb = args.pop();
|
||||
cb = cb || function () { }; // noop.
|
||||
if (!this.cache[this.locale])
|
||||
this._readLocaleFile();
|
||||
// we've observed a new string, update the language file.
|
||||
if (!this.cache[this.locale][str] && this.updateFiles) {
|
||||
this.cache[this.locale][str] = str;
|
||||
// include the current directory and locale,
|
||||
// since these values could change before the
|
||||
// write is performed.
|
||||
this._enqueueWrite({
|
||||
directory: this.directory,
|
||||
locale: this.locale,
|
||||
cb
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
return shim.format.apply(shim.format, [this.cache[this.locale][str] || str].concat(args));
|
||||
}
|
||||
__n() {
|
||||
const args = Array.prototype.slice.call(arguments);
|
||||
const singular = args.shift();
|
||||
const plural = args.shift();
|
||||
const quantity = args.shift();
|
||||
let cb = function () { }; // start with noop.
|
||||
if (typeof args[args.length - 1] === 'function')
|
||||
cb = args.pop();
|
||||
if (!this.cache[this.locale])
|
||||
this._readLocaleFile();
|
||||
let str = quantity === 1 ? singular : plural;
|
||||
if (this.cache[this.locale][singular]) {
|
||||
const entry = this.cache[this.locale][singular];
|
||||
str = entry[quantity === 1 ? 'one' : 'other'];
|
||||
}
|
||||
// we've observed a new string, update the language file.
|
||||
if (!this.cache[this.locale][singular] && this.updateFiles) {
|
||||
this.cache[this.locale][singular] = {
|
||||
one: singular,
|
||||
other: plural
|
||||
};
|
||||
// include the current directory and locale,
|
||||
// since these values could change before the
|
||||
// write is performed.
|
||||
this._enqueueWrite({
|
||||
directory: this.directory,
|
||||
locale: this.locale,
|
||||
cb
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
// if a %d placeholder is provided, add quantity
|
||||
// to the arguments expanded by util.format.
|
||||
const values = [str];
|
||||
if (~str.indexOf('%d'))
|
||||
values.push(quantity);
|
||||
return shim.format.apply(shim.format, values.concat(args));
|
||||
}
|
||||
setLocale(locale) {
|
||||
this.locale = locale;
|
||||
}
|
||||
getLocale() {
|
||||
return this.locale;
|
||||
}
|
||||
updateLocale(obj) {
|
||||
if (!this.cache[this.locale])
|
||||
this._readLocaleFile();
|
||||
for (const key in obj) {
|
||||
if (Object.prototype.hasOwnProperty.call(obj, key)) {
|
||||
this.cache[this.locale][key] = obj[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
_taggedLiteral(parts, ...args) {
|
||||
let str = '';
|
||||
parts.forEach(function (part, i) {
|
||||
const arg = args[i + 1];
|
||||
str += part;
|
||||
if (typeof arg !== 'undefined') {
|
||||
str += '%s';
|
||||
}
|
||||
});
|
||||
return this.__.apply(this, [str].concat([].slice.call(args, 1)));
|
||||
}
|
||||
_enqueueWrite(work) {
|
||||
this.writeQueue.push(work);
|
||||
if (this.writeQueue.length === 1)
|
||||
this._processWriteQueue();
|
||||
}
|
||||
_processWriteQueue() {
|
||||
const _this = this;
|
||||
const work = this.writeQueue[0];
|
||||
// destructure the enqueued work.
|
||||
const directory = work.directory;
|
||||
const locale = work.locale;
|
||||
const cb = work.cb;
|
||||
const languageFile = this._resolveLocaleFile(directory, locale);
|
||||
const serializedLocale = JSON.stringify(this.cache[locale], null, 2);
|
||||
shim.fs.writeFile(languageFile, serializedLocale, 'utf-8', function (err) {
|
||||
_this.writeQueue.shift();
|
||||
if (_this.writeQueue.length > 0)
|
||||
_this._processWriteQueue();
|
||||
cb(err);
|
||||
});
|
||||
}
|
||||
_readLocaleFile() {
|
||||
let localeLookup = {};
|
||||
const languageFile = this._resolveLocaleFile(this.directory, this.locale);
|
||||
try {
|
||||
// When using a bundler such as webpack, readFileSync may not be defined:
|
||||
if (shim.fs.readFileSync) {
|
||||
localeLookup = JSON.parse(shim.fs.readFileSync(languageFile, 'utf-8'));
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
if (err instanceof SyntaxError) {
|
||||
err.message = 'syntax error in ' + languageFile;
|
||||
}
|
||||
if (err.code === 'ENOENT')
|
||||
localeLookup = {};
|
||||
else
|
||||
throw err;
|
||||
}
|
||||
this.cache[this.locale] = localeLookup;
|
||||
}
|
||||
_resolveLocaleFile(directory, locale) {
|
||||
let file = shim.resolve(directory, './', locale + '.json');
|
||||
if (this.fallbackToLanguage && !this._fileExistsSync(file) && ~locale.lastIndexOf('_')) {
|
||||
// attempt fallback to language only
|
||||
const languageFile = shim.resolve(directory, './', locale.split('_')[0] + '.json');
|
||||
if (this._fileExistsSync(languageFile))
|
||||
file = languageFile;
|
||||
}
|
||||
return file;
|
||||
}
|
||||
_fileExistsSync(file) {
|
||||
return shim.exists(file);
|
||||
}
|
||||
}
|
||||
export function y18n(opts, _shim) {
|
||||
shim = _shim;
|
||||
const y18n = new Y18N(opts);
|
||||
return {
|
||||
__: y18n.__.bind(y18n),
|
||||
__n: y18n.__n.bind(y18n),
|
||||
setLocale: y18n.setLocale.bind(y18n),
|
||||
getLocale: y18n.getLocale.bind(y18n),
|
||||
updateLocale: y18n.updateLocale.bind(y18n),
|
||||
locale: y18n.locale
|
||||
};
|
||||
}
|
||||
19
Jira_helper/node_modules/y18n/build/lib/platform-shims/node.js
generated
vendored
Normal file
19
Jira_helper/node_modules/y18n/build/lib/platform-shims/node.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
import { readFileSync, statSync, writeFile } from 'fs';
|
||||
import { format } from 'util';
|
||||
import { resolve } from 'path';
|
||||
export default {
|
||||
fs: {
|
||||
readFileSync,
|
||||
writeFile
|
||||
},
|
||||
format,
|
||||
resolve,
|
||||
exists: (file) => {
|
||||
try {
|
||||
return statSync(file).isFile();
|
||||
}
|
||||
catch (err) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
8
Jira_helper/node_modules/y18n/index.mjs
generated
vendored
Normal file
8
Jira_helper/node_modules/y18n/index.mjs
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
import shim from './build/lib/platform-shims/node.js'
|
||||
import { y18n as _y18n } from './build/lib/index.js'
|
||||
|
||||
const y18n = (opts) => {
|
||||
return _y18n(opts, shim)
|
||||
}
|
||||
|
||||
export default y18n
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user