[generated] sandpack files from: codesandbox-client
This commit is contained in:
1
sandpack-generated/static/browserfs11/node/generic/dropbox_bridge_actual.d.ts
vendored
Normal file
1
sandpack-generated/static/browserfs11/node/generic/dropbox_bridge_actual.d.ts
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export declare const Dropbox: any;
|
||||
@@ -0,0 +1,7 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Dropbox = void 0;
|
||||
var global_1 = require("../core/global");
|
||||
// If Dropbox isn't on the webpage, then set this to null.
|
||||
exports.Dropbox = global_1.default.Dropbox ? global_1.default.Dropbox.Dropbox : undefined;
|
||||
//# sourceMappingURL=dropbox_bridge_actual.js.map
|
||||
125
sandpack-generated/static/browserfs11/node/generic/emscripten_fs.d.ts
vendored
Normal file
125
sandpack-generated/static/browserfs11/node/generic/emscripten_fs.d.ts
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
/**
|
||||
* Defines an Emscripten file system object for use in the Emscripten virtual
|
||||
* filesystem. Allows you to use synchronous BrowserFS file systems from within
|
||||
* Emscripten.
|
||||
*
|
||||
* You can construct a BFSEmscriptenFS, mount it using its mount command,
|
||||
* and then mount it into Emscripten.
|
||||
*
|
||||
* Adapted from Emscripten's NodeFS:
|
||||
* https://raw.github.com/kripken/emscripten/master/src/library_nodefs.js
|
||||
*/
|
||||
import FS from '../core/FS';
|
||||
export interface Stats {
|
||||
dev: number;
|
||||
ino: number;
|
||||
mode: number;
|
||||
nlink: number;
|
||||
uid: number;
|
||||
gid: number;
|
||||
rdev: number;
|
||||
size: number;
|
||||
blksize: number;
|
||||
blocks: number;
|
||||
atime: Date;
|
||||
mtime: Date;
|
||||
ctime: Date;
|
||||
timestamp?: number;
|
||||
}
|
||||
export interface EmscriptenFSNode {
|
||||
name: string;
|
||||
mode: number;
|
||||
parent: EmscriptenFSNode;
|
||||
mount: {
|
||||
opts: {
|
||||
root: string;
|
||||
};
|
||||
};
|
||||
stream_ops: EmscriptenStreamOps;
|
||||
node_ops: EmscriptenNodeOps;
|
||||
}
|
||||
export interface EmscriptenStream {
|
||||
node: EmscriptenFSNode;
|
||||
nfd: any;
|
||||
flags: string;
|
||||
position: number;
|
||||
}
|
||||
export interface EmscriptenNodeOps {
|
||||
getattr(node: EmscriptenFSNode): Stats;
|
||||
setattr(node: EmscriptenFSNode, attr: Stats): void;
|
||||
lookup(parent: EmscriptenFSNode, name: string): EmscriptenFSNode;
|
||||
mknod(parent: EmscriptenFSNode, name: string, mode: number, dev: any): EmscriptenFSNode;
|
||||
rename(oldNode: EmscriptenFSNode, newDir: EmscriptenFSNode, newName: string): void;
|
||||
unlink(parent: EmscriptenFSNode, name: string): void;
|
||||
rmdir(parent: EmscriptenFSNode, name: string): void;
|
||||
readdir(node: EmscriptenFSNode): string[];
|
||||
symlink(parent: EmscriptenFSNode, newName: string, oldPath: string): void;
|
||||
readlink(node: EmscriptenFSNode): string;
|
||||
}
|
||||
export interface EmscriptenStreamOps {
|
||||
open(stream: EmscriptenStream): void;
|
||||
close(stream: EmscriptenStream): void;
|
||||
read(stream: EmscriptenStream, buffer: Uint8Array, offset: number, length: number, position: number): number;
|
||||
write(stream: EmscriptenStream, buffer: Uint8Array, offset: number, length: number, position: number): number;
|
||||
llseek(stream: EmscriptenStream, offset: number, whence: number): number;
|
||||
}
|
||||
export interface EmscriptenFS {
|
||||
node_ops: EmscriptenNodeOps;
|
||||
stream_ops: EmscriptenStreamOps;
|
||||
mount(mount: {
|
||||
opts: {
|
||||
root: string;
|
||||
};
|
||||
}): EmscriptenFSNode;
|
||||
createNode(parent: EmscriptenFSNode, name: string, mode: number, dev?: any): EmscriptenFSNode;
|
||||
getMode(path: string): number;
|
||||
realPath(node: EmscriptenFSNode): string;
|
||||
}
|
||||
export default class BFSEmscriptenFS implements EmscriptenFS {
|
||||
flagsToPermissionStringMap: {
|
||||
0: string;
|
||||
1: string;
|
||||
2: string;
|
||||
64: string;
|
||||
65: string;
|
||||
66: string;
|
||||
129: string;
|
||||
193: string;
|
||||
514: string;
|
||||
577: string;
|
||||
578: string;
|
||||
705: string;
|
||||
706: string;
|
||||
1024: string;
|
||||
1025: string;
|
||||
1026: string;
|
||||
1089: string;
|
||||
1090: string;
|
||||
1153: string;
|
||||
1154: string;
|
||||
1217: string;
|
||||
1218: string;
|
||||
4096: string;
|
||||
4098: string;
|
||||
};
|
||||
node_ops: EmscriptenNodeOps;
|
||||
stream_ops: EmscriptenStreamOps;
|
||||
private FS;
|
||||
private PATH;
|
||||
private ERRNO_CODES;
|
||||
private nodefs;
|
||||
constructor(_FS?: any, _PATH?: any, _ERRNO_CODES?: any, nodefs?: FS);
|
||||
mount(m: {
|
||||
opts: {
|
||||
root: string;
|
||||
};
|
||||
}): EmscriptenFSNode;
|
||||
createNode(parent: EmscriptenFSNode | null, name: string, mode: number, dev?: any): EmscriptenFSNode;
|
||||
getMode(path: string): number;
|
||||
realPath(node: EmscriptenFSNode): string;
|
||||
flagsToPermissionString(flags: string | number): string;
|
||||
getNodeFS(): FS;
|
||||
getFS(): any;
|
||||
getPATH(): any;
|
||||
getERRNO_CODES(): any;
|
||||
}
|
||||
@@ -0,0 +1,365 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var node_fs_1 = require("../core/node_fs");
|
||||
var util_1 = require("../core/util");
|
||||
var BFSEmscriptenStreamOps = /** @class */ (function () {
|
||||
function BFSEmscriptenStreamOps(fs) {
|
||||
this.fs = fs;
|
||||
this.nodefs = fs.getNodeFS();
|
||||
this.FS = fs.getFS();
|
||||
this.PATH = fs.getPATH();
|
||||
this.ERRNO_CODES = fs.getERRNO_CODES();
|
||||
}
|
||||
BFSEmscriptenStreamOps.prototype.open = function (stream) {
|
||||
var path = this.fs.realPath(stream.node);
|
||||
var FS = this.FS;
|
||||
try {
|
||||
if (FS.isFile(stream.node.mode)) {
|
||||
stream.nfd = this.nodefs.openSync(path, this.fs.flagsToPermissionString(stream.flags));
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenStreamOps.prototype.close = function (stream) {
|
||||
var FS = this.FS;
|
||||
try {
|
||||
if (FS.isFile(stream.node.mode) && stream.nfd) {
|
||||
this.nodefs.closeSync(stream.nfd);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenStreamOps.prototype.read = function (stream, buffer, offset, length, position) {
|
||||
// Avoid copying overhead by reading directly into buffer.
|
||||
try {
|
||||
return this.nodefs.readSync(stream.nfd, (0, util_1.uint8Array2Buffer)(buffer), offset, length, position);
|
||||
}
|
||||
catch (e) {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenStreamOps.prototype.write = function (stream, buffer, offset, length, position) {
|
||||
// Avoid copying overhead.
|
||||
try {
|
||||
return this.nodefs.writeSync(stream.nfd, (0, util_1.uint8Array2Buffer)(buffer), offset, length, position);
|
||||
}
|
||||
catch (e) {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenStreamOps.prototype.llseek = function (stream, offset, whence) {
|
||||
var position = offset;
|
||||
if (whence === 1) { // SEEK_CUR.
|
||||
position += stream.position;
|
||||
}
|
||||
else if (whence === 2) { // SEEK_END.
|
||||
if (this.FS.isFile(stream.node.mode)) {
|
||||
try {
|
||||
var stat = this.nodefs.fstatSync(stream.nfd);
|
||||
position += stat.size;
|
||||
}
|
||||
catch (e) {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (position < 0) {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES.EINVAL);
|
||||
}
|
||||
stream.position = position;
|
||||
return position;
|
||||
};
|
||||
return BFSEmscriptenStreamOps;
|
||||
}());
|
||||
var BFSEmscriptenNodeOps = /** @class */ (function () {
|
||||
function BFSEmscriptenNodeOps(fs) {
|
||||
this.fs = fs;
|
||||
this.nodefs = fs.getNodeFS();
|
||||
this.FS = fs.getFS();
|
||||
this.PATH = fs.getPATH();
|
||||
this.ERRNO_CODES = fs.getERRNO_CODES();
|
||||
}
|
||||
BFSEmscriptenNodeOps.prototype.getattr = function (node) {
|
||||
var path = this.fs.realPath(node);
|
||||
var stat;
|
||||
try {
|
||||
stat = this.nodefs.lstatSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
return {
|
||||
dev: stat.dev,
|
||||
ino: stat.ino,
|
||||
mode: stat.mode,
|
||||
nlink: stat.nlink,
|
||||
uid: stat.uid,
|
||||
gid: stat.gid,
|
||||
rdev: stat.rdev,
|
||||
size: stat.size,
|
||||
atime: stat.atime,
|
||||
mtime: stat.mtime,
|
||||
ctime: stat.ctime,
|
||||
blksize: stat.blksize,
|
||||
blocks: stat.blocks
|
||||
};
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.setattr = function (node, attr) {
|
||||
var path = this.fs.realPath(node);
|
||||
try {
|
||||
if (attr.mode !== undefined) {
|
||||
this.nodefs.chmodSync(path, attr.mode);
|
||||
// update the common node structure mode as well
|
||||
node.mode = attr.mode;
|
||||
}
|
||||
if (attr.timestamp !== undefined) {
|
||||
var date = new Date(attr.timestamp);
|
||||
this.nodefs.utimesSync(path, date, date);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
// Ignore not supported errors. Emscripten does utimesSync when it
|
||||
// writes files, but never really requires the value to be set.
|
||||
if (e.code !== "ENOTSUP") {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
}
|
||||
if (attr.size !== undefined) {
|
||||
try {
|
||||
this.nodefs.truncateSync(path, attr.size);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.lookup = function (parent, name) {
|
||||
var path = this.PATH.join2(this.fs.realPath(parent), name);
|
||||
var mode = this.fs.getMode(path);
|
||||
return this.fs.createNode(parent, name, mode);
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.mknod = function (parent, name, mode, dev) {
|
||||
var node = this.fs.createNode(parent, name, mode, dev);
|
||||
// create the backing node for this in the fs root as well
|
||||
var path = this.fs.realPath(node);
|
||||
try {
|
||||
if (this.FS.isDir(node.mode)) {
|
||||
this.nodefs.mkdirSync(path, node.mode);
|
||||
}
|
||||
else {
|
||||
this.nodefs.writeFileSync(path, '', { mode: node.mode });
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
return node;
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.rename = function (oldNode, newDir, newName) {
|
||||
var oldPath = this.fs.realPath(oldNode);
|
||||
var newPath = this.PATH.join2(this.fs.realPath(newDir), newName);
|
||||
try {
|
||||
this.nodefs.renameSync(oldPath, newPath);
|
||||
// This logic is missing from the original NodeFS,
|
||||
// causing Emscripten's filesystem to think that the old file still exists.
|
||||
oldNode.name = newName;
|
||||
oldNode.parent = newDir;
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.unlink = function (parent, name) {
|
||||
var path = this.PATH.join2(this.fs.realPath(parent), name);
|
||||
try {
|
||||
this.nodefs.unlinkSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.rmdir = function (parent, name) {
|
||||
var path = this.PATH.join2(this.fs.realPath(parent), name);
|
||||
try {
|
||||
this.nodefs.rmdirSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.readdir = function (node) {
|
||||
var path = this.fs.realPath(node);
|
||||
try {
|
||||
// Node does not list . and .. in directory listings,
|
||||
// but Emscripten expects it.
|
||||
var contents = this.nodefs.readdirSync(path);
|
||||
contents.push('.', '..');
|
||||
return contents;
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.symlink = function (parent, newName, oldPath) {
|
||||
var newPath = this.PATH.join2(this.fs.realPath(parent), newName);
|
||||
try {
|
||||
this.nodefs.symlinkSync(oldPath, newPath);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.readlink = function (node) {
|
||||
var path = this.fs.realPath(node);
|
||||
try {
|
||||
return this.nodefs.readlinkSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
return BFSEmscriptenNodeOps;
|
||||
}());
|
||||
var BFSEmscriptenFS = /** @class */ (function () {
|
||||
function BFSEmscriptenFS(_FS, _PATH, _ERRNO_CODES, nodefs) {
|
||||
if (_FS === void 0) { _FS = self['FS']; }
|
||||
if (_PATH === void 0) { _PATH = self['PATH']; }
|
||||
if (_ERRNO_CODES === void 0) { _ERRNO_CODES = self['ERRNO_CODES']; }
|
||||
if (nodefs === void 0) { nodefs = node_fs_1.default; }
|
||||
// This maps the integer permission modes from http://linux.die.net/man/3/open
|
||||
// to node.js-specific file open permission strings at http://nodejs.org/api/fs.html#fs_fs_open_path_flags_mode_callback
|
||||
this.flagsToPermissionStringMap = {
|
||||
0 /*O_RDONLY*/: 'r',
|
||||
1 /*O_WRONLY*/: 'r+',
|
||||
2 /*O_RDWR*/: 'r+',
|
||||
64 /*O_CREAT*/: 'r',
|
||||
65 /*O_WRONLY|O_CREAT*/: 'r+',
|
||||
66 /*O_RDWR|O_CREAT*/: 'r+',
|
||||
129 /*O_WRONLY|O_EXCL*/: 'rx+',
|
||||
193 /*O_WRONLY|O_CREAT|O_EXCL*/: 'rx+',
|
||||
514 /*O_RDWR|O_TRUNC*/: 'w+',
|
||||
577 /*O_WRONLY|O_CREAT|O_TRUNC*/: 'w',
|
||||
578 /*O_CREAT|O_RDWR|O_TRUNC*/: 'w+',
|
||||
705 /*O_WRONLY|O_CREAT|O_EXCL|O_TRUNC*/: 'wx',
|
||||
706 /*O_RDWR|O_CREAT|O_EXCL|O_TRUNC*/: 'wx+',
|
||||
1024 /*O_APPEND*/: 'a',
|
||||
1025 /*O_WRONLY|O_APPEND*/: 'a',
|
||||
1026 /*O_RDWR|O_APPEND*/: 'a+',
|
||||
1089 /*O_WRONLY|O_CREAT|O_APPEND*/: 'a',
|
||||
1090 /*O_RDWR|O_CREAT|O_APPEND*/: 'a+',
|
||||
1153 /*O_WRONLY|O_EXCL|O_APPEND*/: 'ax',
|
||||
1154 /*O_RDWR|O_EXCL|O_APPEND*/: 'ax+',
|
||||
1217 /*O_WRONLY|O_CREAT|O_EXCL|O_APPEND*/: 'ax',
|
||||
1218 /*O_RDWR|O_CREAT|O_EXCL|O_APPEND*/: 'ax+',
|
||||
4096 /*O_RDONLY|O_DSYNC*/: 'rs',
|
||||
4098 /*O_RDWR|O_DSYNC*/: 'rs+'
|
||||
};
|
||||
this.nodefs = nodefs;
|
||||
this.FS = _FS;
|
||||
this.PATH = _PATH;
|
||||
this.ERRNO_CODES = _ERRNO_CODES;
|
||||
this.node_ops = new BFSEmscriptenNodeOps(this);
|
||||
this.stream_ops = new BFSEmscriptenStreamOps(this);
|
||||
}
|
||||
BFSEmscriptenFS.prototype.mount = function (m) {
|
||||
return this.createNode(null, '/', this.getMode(m.opts.root), 0);
|
||||
};
|
||||
BFSEmscriptenFS.prototype.createNode = function (parent, name, mode, dev) {
|
||||
var FS = this.FS;
|
||||
if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
|
||||
throw new FS.ErrnoError(this.ERRNO_CODES.EINVAL);
|
||||
}
|
||||
var node = FS.createNode(parent, name, mode);
|
||||
node.node_ops = this.node_ops;
|
||||
node.stream_ops = this.stream_ops;
|
||||
return node;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getMode = function (path) {
|
||||
var stat;
|
||||
try {
|
||||
stat = this.nodefs.lstatSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
return stat.mode;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.realPath = function (node) {
|
||||
var parts = [];
|
||||
while (node.parent !== node) {
|
||||
parts.push(node.name);
|
||||
node = node.parent;
|
||||
}
|
||||
parts.push(node.mount.opts.root);
|
||||
parts.reverse();
|
||||
return this.PATH.join.apply(null, parts);
|
||||
};
|
||||
BFSEmscriptenFS.prototype.flagsToPermissionString = function (flags) {
|
||||
var parsedFlags = (typeof flags === "string") ? parseInt(flags, 10) : flags;
|
||||
parsedFlags &= 0x1FFF;
|
||||
if (parsedFlags in this.flagsToPermissionStringMap) {
|
||||
return this.flagsToPermissionStringMap[parsedFlags];
|
||||
}
|
||||
else {
|
||||
return flags;
|
||||
}
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getNodeFS = function () {
|
||||
return this.nodefs;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getFS = function () {
|
||||
return this.FS;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getPATH = function () {
|
||||
return this.PATH;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getERRNO_CODES = function () {
|
||||
return this.ERRNO_CODES;
|
||||
};
|
||||
return BFSEmscriptenFS;
|
||||
}());
|
||||
exports.default = BFSEmscriptenFS;
|
||||
//# sourceMappingURL=emscripten_fs.js.map
|
||||
14
sandpack-generated/static/browserfs11/node/generic/extended_ascii.d.ts
vendored
Normal file
14
sandpack-generated/static/browserfs11/node/generic/extended_ascii.d.ts
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
/// <reference types="node" />
|
||||
/**
|
||||
* (Nonstandard) String utility function for 8-bit ASCII with the extended
|
||||
* character set. Unlike the ASCII above, we do not mask the high bits.
|
||||
*
|
||||
* Placed into a separate file so it can be used with other Buffer implementations.
|
||||
* @see http://en.wikipedia.org/wiki/Extended_ASCII
|
||||
*/
|
||||
export default class ExtendedASCII {
|
||||
private static extendedChars;
|
||||
static str2byte(str: string, buf: Buffer): number;
|
||||
static byte2str(buff: Buffer): string;
|
||||
static byteLength(str: string): number;
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* (Nonstandard) String utility function for 8-bit ASCII with the extended
|
||||
* character set. Unlike the ASCII above, we do not mask the high bits.
|
||||
*
|
||||
* Placed into a separate file so it can be used with other Buffer implementations.
|
||||
* @see http://en.wikipedia.org/wiki/Extended_ASCII
|
||||
*/
|
||||
var ExtendedASCII = /** @class */ (function () {
|
||||
function ExtendedASCII() {
|
||||
}
|
||||
ExtendedASCII.str2byte = function (str, buf) {
|
||||
var length = str.length > buf.length ? buf.length : str.length;
|
||||
for (var i = 0; i < length; i++) {
|
||||
var charCode = str.charCodeAt(i);
|
||||
if (charCode > 0x7F) {
|
||||
// Check if extended ASCII.
|
||||
var charIdx = ExtendedASCII.extendedChars.indexOf(str.charAt(i));
|
||||
if (charIdx > -1) {
|
||||
charCode = charIdx + 0x80;
|
||||
}
|
||||
// Otherwise, keep it as-is.
|
||||
}
|
||||
buf[charCode] = i;
|
||||
}
|
||||
return length;
|
||||
};
|
||||
ExtendedASCII.byte2str = function (buff) {
|
||||
var chars = new Array(buff.length);
|
||||
for (var i = 0; i < buff.length; i++) {
|
||||
var charCode = buff[i];
|
||||
if (charCode > 0x7F) {
|
||||
chars[i] = ExtendedASCII.extendedChars[charCode - 128];
|
||||
}
|
||||
else {
|
||||
chars[i] = String.fromCharCode(charCode);
|
||||
}
|
||||
}
|
||||
return chars.join('');
|
||||
};
|
||||
ExtendedASCII.byteLength = function (str) { return str.length; };
|
||||
ExtendedASCII.extendedChars = ['\u00C7', '\u00FC', '\u00E9', '\u00E2', '\u00E4',
|
||||
'\u00E0', '\u00E5', '\u00E7', '\u00EA', '\u00EB', '\u00E8', '\u00EF',
|
||||
'\u00EE', '\u00EC', '\u00C4', '\u00C5', '\u00C9', '\u00E6', '\u00C6',
|
||||
'\u00F4', '\u00F6', '\u00F2', '\u00FB', '\u00F9', '\u00FF', '\u00D6',
|
||||
'\u00DC', '\u00F8', '\u00A3', '\u00D8', '\u00D7', '\u0192', '\u00E1',
|
||||
'\u00ED', '\u00F3', '\u00FA', '\u00F1', '\u00D1', '\u00AA', '\u00BA',
|
||||
'\u00BF', '\u00AE', '\u00AC', '\u00BD', '\u00BC', '\u00A1', '\u00AB',
|
||||
'\u00BB', '_', '_', '_', '\u00A6', '\u00A6', '\u00C1', '\u00C2', '\u00C0',
|
||||
'\u00A9', '\u00A6', '\u00A6', '+', '+', '\u00A2', '\u00A5', '+', '+', '-',
|
||||
'-', '+', '-', '+', '\u00E3', '\u00C3', '+', '+', '-', '-', '\u00A6', '-',
|
||||
'+', '\u00A4', '\u00F0', '\u00D0', '\u00CA', '\u00CB', '\u00C8', 'i',
|
||||
'\u00CD', '\u00CE', '\u00CF', '+', '+', '_', '_', '\u00A6', '\u00CC', '_',
|
||||
'\u00D3', '\u00DF', '\u00D4', '\u00D2', '\u00F5', '\u00D5', '\u00B5',
|
||||
'\u00FE', '\u00DE', '\u00DA', '\u00DB', '\u00D9', '\u00FD', '\u00DD',
|
||||
'\u00AF', '\u00B4', '\u00AD', '\u00B1', '_', '\u00BE', '\u00B6', '\u00A7',
|
||||
'\u00F7', '\u00B8', '\u00B0', '\u00A8', '\u00B7', '\u00B9', '\u00B3',
|
||||
'\u00B2', '_', ' '];
|
||||
return ExtendedASCII;
|
||||
}());
|
||||
exports.default = ExtendedASCII;
|
||||
//# sourceMappingURL=extended_ascii.js.map
|
||||
21
sandpack-generated/static/browserfs11/node/generic/fetch.d.ts
vendored
Normal file
21
sandpack-generated/static/browserfs11/node/generic/fetch.d.ts
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
/**
|
||||
* Contains utility methods using 'fetch'.
|
||||
*/
|
||||
/// <reference types="node" />
|
||||
import { BFSCallback } from '../core/file_system';
|
||||
export declare const fetchIsAvailable: boolean;
|
||||
/**
|
||||
* Asynchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function fetchFileAsync(p: string, type: 'buffer', cb: BFSCallback<Buffer>): void;
|
||||
export declare function fetchFileAsync(p: string, type: 'json', cb: BFSCallback<any>): void;
|
||||
export declare function fetchFileAsync(p: string, type: string, cb: BFSCallback<any>): void;
|
||||
/**
|
||||
* Asynchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function fetchFileSizeAsync(p: string, cb: BFSCallback<number>): void;
|
||||
60
sandpack-generated/static/browserfs11/node/generic/fetch.js
Normal file
60
sandpack-generated/static/browserfs11/node/generic/fetch.js
Normal file
@@ -0,0 +1,60 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Contains utility methods using 'fetch'.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.fetchFileSizeAsync = exports.fetchFileAsync = exports.fetchIsAvailable = void 0;
|
||||
var api_error_1 = require("../core/api_error");
|
||||
exports.fetchIsAvailable = (typeof (fetch) !== "undefined" && fetch !== null);
|
||||
function fetchFileAsync(p, type, cb) {
|
||||
var request;
|
||||
try {
|
||||
request = fetch(p);
|
||||
}
|
||||
catch (e) {
|
||||
// XXX: fetch will throw a TypeError if the URL has credentials in it
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, e.message));
|
||||
}
|
||||
request
|
||||
.then(function (res) {
|
||||
if (!res.ok) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "fetch error: response returned code ".concat(res.status)));
|
||||
}
|
||||
else {
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
res.arrayBuffer()
|
||||
.then(function (buf) { return cb(null, Buffer.from(buf)); })
|
||||
.catch(function (err) { return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, err.message)); });
|
||||
break;
|
||||
case 'json':
|
||||
res.json()
|
||||
.then(function (json) { return cb(null, json); })
|
||||
.catch(function (err) { return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, err.message)); });
|
||||
break;
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid download type: " + type));
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(function (err) { return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, err.message)); });
|
||||
}
|
||||
exports.fetchFileAsync = fetchFileAsync;
|
||||
/**
|
||||
* Asynchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
function fetchFileSizeAsync(p, cb) {
|
||||
fetch(p, { method: 'HEAD' })
|
||||
.then(function (res) {
|
||||
if (!res.ok) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "fetch HEAD error: response returned code ".concat(res.status)));
|
||||
}
|
||||
else {
|
||||
return cb(null, parseInt(res.headers.get('Content-Length') || '-1', 10));
|
||||
}
|
||||
})
|
||||
.catch(function (err) { return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, err.message)); });
|
||||
}
|
||||
exports.fetchFileSizeAsync = fetchFileSizeAsync;
|
||||
//# sourceMappingURL=fetch.js.map
|
||||
150
sandpack-generated/static/browserfs11/node/generic/file_index.d.ts
vendored
Normal file
150
sandpack-generated/static/browserfs11/node/generic/file_index.d.ts
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { UNPKGMeta } from '../backend/UNPKGRequest';
|
||||
import { JSDelivrMeta } from '../backend/JSDelivrRequest';
|
||||
/**
|
||||
* A simple class for storing a filesystem index. Assumes that all paths passed
|
||||
* to it are *absolute* paths.
|
||||
*
|
||||
* Can be used as a partial or a full index, although care must be taken if used
|
||||
* for the former purpose, especially when directories are concerned.
|
||||
*/
|
||||
export declare class FileIndex<T> {
|
||||
/**
|
||||
* Static method for constructing indices from a JSON listing.
|
||||
* @param listing Directory listing generated by tools/XHRIndexer.coffee
|
||||
* @return A new FileIndex object.
|
||||
*/
|
||||
static fromListing<T>(listing: any): FileIndex<T>;
|
||||
static fromUnpkg<T>(listing: UNPKGMeta): FileIndex<T>;
|
||||
static fromJSDelivr<T>(listing: JSDelivrMeta): FileIndex<T>;
|
||||
private _index;
|
||||
/**
|
||||
* Constructs a new FileIndex.
|
||||
*/
|
||||
constructor();
|
||||
/**
|
||||
* Runs the given function over all files in the index.
|
||||
*/
|
||||
fileIterator<T>(cb: (file: T | null, path?: string) => void): void;
|
||||
/**
|
||||
* Adds the given absolute path to the index if it is not already in the index.
|
||||
* Creates any needed parent directories.
|
||||
* @param path The path to add to the index.
|
||||
* @param inode The inode for the
|
||||
* path to add.
|
||||
* @return 'True' if it was added or already exists, 'false' if there
|
||||
* was an issue adding it (e.g. item in path is a file, item exists but is
|
||||
* different).
|
||||
* @todo If adding fails and implicitly creates directories, we do not clean up
|
||||
* the new empty directories.
|
||||
*/
|
||||
addPath(path: string, inode: Inode): boolean;
|
||||
/**
|
||||
* Adds the given absolute path to the index if it is not already in the index.
|
||||
* The path is added without special treatment (no joining of adjacent separators, etc).
|
||||
* Creates any needed parent directories.
|
||||
* @param path The path to add to the index.
|
||||
* @param inode The inode for the
|
||||
* path to add.
|
||||
* @return 'True' if it was added or already exists, 'false' if there
|
||||
* was an issue adding it (e.g. item in path is a file, item exists but is
|
||||
* different).
|
||||
* @todo If adding fails and implicitly creates directories, we do not clean up
|
||||
* the new empty directories.
|
||||
*/
|
||||
addPathFast(path: string, inode: Inode): boolean;
|
||||
/**
|
||||
* Removes the given path. Can be a file or a directory.
|
||||
* @return The removed item,
|
||||
* or null if it did not exist.
|
||||
*/
|
||||
removePath(path: string): Inode | null;
|
||||
/**
|
||||
* Retrieves the directory listing of the given path.
|
||||
* @return An array of files in the given path, or 'null' if it does not exist.
|
||||
*/
|
||||
ls(path: string): string[] | null;
|
||||
/**
|
||||
* Returns the inode of the given item.
|
||||
* @return Returns null if the item does not exist.
|
||||
*/
|
||||
getInode(path: string): Inode | null;
|
||||
/**
|
||||
* Split into a (directory path, item name) pair
|
||||
*/
|
||||
private _split_path;
|
||||
}
|
||||
/**
|
||||
* Generic interface for file/directory inodes.
|
||||
* Note that Stats objects are what we use for file inodes.
|
||||
*/
|
||||
export interface Inode {
|
||||
isFile(): boolean;
|
||||
isDir(): boolean;
|
||||
}
|
||||
/**
|
||||
* Inode for a file. Stores an arbitrary (filesystem-specific) data payload.
|
||||
*/
|
||||
export declare class FileInode<T> implements Inode {
|
||||
private data;
|
||||
constructor(data: T);
|
||||
isFile(): boolean;
|
||||
isDir(): boolean;
|
||||
getData(): T;
|
||||
setData(data: T): void;
|
||||
}
|
||||
/**
|
||||
* Inode for a directory. Currently only contains the directory listing.
|
||||
*/
|
||||
export declare class DirInode<T> implements Inode {
|
||||
private data;
|
||||
private _ls;
|
||||
/**
|
||||
* Constructs an inode for a directory.
|
||||
*/
|
||||
constructor(data?: T | null);
|
||||
isFile(): boolean;
|
||||
isDir(): boolean;
|
||||
getData(): T | null;
|
||||
/**
|
||||
* Return a Stats object for this inode.
|
||||
* @todo Should probably remove this at some point. This isn't the
|
||||
* responsibility of the FileIndex.
|
||||
*/
|
||||
getStats(): Stats;
|
||||
/**
|
||||
* Returns the directory listing for this directory. Paths in the directory are
|
||||
* relative to the directory's path.
|
||||
* @return The directory listing for this directory.
|
||||
*/
|
||||
getListing(): string[];
|
||||
/**
|
||||
* Returns the inode for the indicated item, or null if it does not exist.
|
||||
* @param p Name of item in this directory.
|
||||
*/
|
||||
getItem(p: string): Inode | null;
|
||||
/**
|
||||
* Add the given item to the directory listing. Note that the given inode is
|
||||
* not copied, and will be mutated by the DirInode if it is a DirInode.
|
||||
* @param p Item name to add to the directory listing.
|
||||
* @param inode The inode for the
|
||||
* item to add to the directory inode.
|
||||
* @return True if it was added, false if it already existed.
|
||||
*/
|
||||
addItem(p: string, inode: Inode): boolean;
|
||||
/**
|
||||
* Removes the given item from the directory listing.
|
||||
* @param p Name of item to remove from the directory listing.
|
||||
* @return Returns the item
|
||||
* removed, or null if the item did not exist.
|
||||
*/
|
||||
remItem(p: string): Inode | null;
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export declare function isFileInode<T>(inode: Inode | null): inode is FileInode<T>;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export declare function isDirInode<T>(inode: Inode | null): inode is DirInode<T>;
|
||||
364
sandpack-generated/static/browserfs11/node/generic/file_index.js
Normal file
364
sandpack-generated/static/browserfs11/node/generic/file_index.js
Normal file
@@ -0,0 +1,364 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isDirInode = exports.isFileInode = exports.DirInode = exports.FileInode = exports.FileIndex = void 0;
|
||||
var path = require("path");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
/**
|
||||
* A simple class for storing a filesystem index. Assumes that all paths passed
|
||||
* to it are *absolute* paths.
|
||||
*
|
||||
* Can be used as a partial or a full index, although care must be taken if used
|
||||
* for the former purpose, especially when directories are concerned.
|
||||
*/
|
||||
var FileIndex = /** @class */ (function () {
|
||||
/**
|
||||
* Constructs a new FileIndex.
|
||||
*/
|
||||
function FileIndex() {
|
||||
// _index is a single-level key,value store that maps *directory* paths to
|
||||
// DirInodes. File information is only contained in DirInodes themselves.
|
||||
this._index = {};
|
||||
// Create the root directory.
|
||||
this.addPath('/', new DirInode());
|
||||
}
|
||||
/**
|
||||
* Static method for constructing indices from a JSON listing.
|
||||
* @param listing Directory listing generated by tools/XHRIndexer.coffee
|
||||
* @return A new FileIndex object.
|
||||
*/
|
||||
FileIndex.fromListing = function (listing) {
|
||||
var idx = new FileIndex();
|
||||
// Add a root DirNode.
|
||||
var rootInode = new DirInode();
|
||||
idx._index['/'] = rootInode;
|
||||
var queue = [['', listing, rootInode]];
|
||||
while (queue.length > 0) {
|
||||
var inode = void 0;
|
||||
var next = queue.pop();
|
||||
var pwd = next[0];
|
||||
var tree = next[1];
|
||||
var parent_1 = next[2];
|
||||
for (var node in tree) {
|
||||
if (tree.hasOwnProperty(node)) {
|
||||
var children = tree[node];
|
||||
var name_1 = "".concat(pwd, "/").concat(node);
|
||||
if (children) {
|
||||
idx._index[name_1] = inode = new DirInode();
|
||||
queue.push([name_1, children, inode]);
|
||||
}
|
||||
else {
|
||||
// This inode doesn't have correct size information, noted with -1.
|
||||
inode = new FileInode(new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, -1, 0x16D));
|
||||
}
|
||||
if (parent_1) {
|
||||
parent_1._ls[node] = inode;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return idx;
|
||||
};
|
||||
FileIndex.fromUnpkg = function (listing) {
|
||||
var idx = new FileIndex();
|
||||
function handleDir(dirPath, entry) {
|
||||
var dirInode = new DirInode();
|
||||
entry.files.forEach(function (child) {
|
||||
var inode;
|
||||
if (child.type === 'file') {
|
||||
inode = new FileInode(new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, child.size));
|
||||
// @ts-ignore
|
||||
dirInode._ls[path.basename(child.path)] = inode;
|
||||
}
|
||||
else {
|
||||
idx._index[child.path] = inode = handleDir(child.path, child);
|
||||
}
|
||||
});
|
||||
return dirInode;
|
||||
}
|
||||
idx._index['/'] = handleDir('/', listing);
|
||||
return idx;
|
||||
};
|
||||
FileIndex.fromJSDelivr = function (listing) {
|
||||
var idx = new FileIndex();
|
||||
listing.files.forEach(function (file) {
|
||||
var inode = new FileInode(new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, file.size));
|
||||
idx.addPathFast(file.name, inode);
|
||||
});
|
||||
return idx;
|
||||
};
|
||||
/**
|
||||
* Runs the given function over all files in the index.
|
||||
*/
|
||||
FileIndex.prototype.fileIterator = function (cb) {
|
||||
for (var path_1 in this._index) {
|
||||
if (this._index.hasOwnProperty(path_1)) {
|
||||
var dir = this._index[path_1];
|
||||
var files = dir.getListing();
|
||||
for (var _i = 0, files_1 = files; _i < files_1.length; _i++) {
|
||||
var file = files_1[_i];
|
||||
var item = dir.getItem(file);
|
||||
if (isFileInode(item)) {
|
||||
cb(item.getData(), path_1 + '/' + file);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Adds the given absolute path to the index if it is not already in the index.
|
||||
* Creates any needed parent directories.
|
||||
* @param path The path to add to the index.
|
||||
* @param inode The inode for the
|
||||
* path to add.
|
||||
* @return 'True' if it was added or already exists, 'false' if there
|
||||
* was an issue adding it (e.g. item in path is a file, item exists but is
|
||||
* different).
|
||||
* @todo If adding fails and implicitly creates directories, we do not clean up
|
||||
* the new empty directories.
|
||||
*/
|
||||
FileIndex.prototype.addPath = function (path, inode) {
|
||||
if (!inode) {
|
||||
throw new Error('Inode must be specified');
|
||||
}
|
||||
if (path[0] !== '/') {
|
||||
throw new Error('Path must be absolute, got: ' + path);
|
||||
}
|
||||
// Check if it already exists.
|
||||
if (this._index.hasOwnProperty(path)) {
|
||||
return this._index[path] === inode;
|
||||
}
|
||||
var splitPath = this._split_path(path);
|
||||
var dirpath = splitPath[0];
|
||||
var itemname = splitPath[1];
|
||||
// Try to add to its parent directory first.
|
||||
var parent = this._index[dirpath];
|
||||
if (parent === undefined && path !== '/') {
|
||||
// Create parent.
|
||||
parent = new DirInode();
|
||||
if (!this.addPath(dirpath, parent)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// Add myself to my parent.
|
||||
if (path !== '/') {
|
||||
if (!parent.addItem(itemname, inode)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// If I'm a directory, add myself to the index.
|
||||
if (isDirInode(inode)) {
|
||||
this._index[path] = inode;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
/**
|
||||
* Adds the given absolute path to the index if it is not already in the index.
|
||||
* The path is added without special treatment (no joining of adjacent separators, etc).
|
||||
* Creates any needed parent directories.
|
||||
* @param path The path to add to the index.
|
||||
* @param inode The inode for the
|
||||
* path to add.
|
||||
* @return 'True' if it was added or already exists, 'false' if there
|
||||
* was an issue adding it (e.g. item in path is a file, item exists but is
|
||||
* different).
|
||||
* @todo If adding fails and implicitly creates directories, we do not clean up
|
||||
* the new empty directories.
|
||||
*/
|
||||
FileIndex.prototype.addPathFast = function (path, inode) {
|
||||
var itemNameMark = path.lastIndexOf('/');
|
||||
var parentPath = itemNameMark === 0 ? "/" : path.substring(0, itemNameMark);
|
||||
var itemName = path.substring(itemNameMark + 1);
|
||||
// Try to add to its parent directory first.
|
||||
var parent = this._index[parentPath];
|
||||
if (parent === undefined) {
|
||||
// Create parent.
|
||||
parent = new DirInode();
|
||||
this.addPathFast(parentPath, parent);
|
||||
}
|
||||
if (!parent.addItem(itemName, inode)) {
|
||||
return false;
|
||||
}
|
||||
// If adding a directory, add to the index as well.
|
||||
if (inode.isDir()) {
|
||||
this._index[path] = inode;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
/**
|
||||
* Removes the given path. Can be a file or a directory.
|
||||
* @return The removed item,
|
||||
* or null if it did not exist.
|
||||
*/
|
||||
FileIndex.prototype.removePath = function (path) {
|
||||
var splitPath = this._split_path(path);
|
||||
var dirpath = splitPath[0];
|
||||
var itemname = splitPath[1];
|
||||
// Try to remove it from its parent directory first.
|
||||
var parent = this._index[dirpath];
|
||||
if (parent === undefined) {
|
||||
return null;
|
||||
}
|
||||
// Remove myself from my parent.
|
||||
var inode = parent.remItem(itemname);
|
||||
if (inode === null) {
|
||||
return null;
|
||||
}
|
||||
// If I'm a directory, remove myself from the index, and remove my children.
|
||||
if (isDirInode(inode)) {
|
||||
var children = inode.getListing();
|
||||
for (var _i = 0, children_1 = children; _i < children_1.length; _i++) {
|
||||
var child = children_1[_i];
|
||||
this.removePath(path + '/' + child);
|
||||
}
|
||||
// Remove the directory from the index, unless it's the root.
|
||||
if (path !== '/') {
|
||||
delete this._index[path];
|
||||
}
|
||||
}
|
||||
return inode;
|
||||
};
|
||||
/**
|
||||
* Retrieves the directory listing of the given path.
|
||||
* @return An array of files in the given path, or 'null' if it does not exist.
|
||||
*/
|
||||
FileIndex.prototype.ls = function (path) {
|
||||
var item = this._index[path];
|
||||
if (item === undefined) {
|
||||
return null;
|
||||
}
|
||||
return item.getListing();
|
||||
};
|
||||
/**
|
||||
* Returns the inode of the given item.
|
||||
* @return Returns null if the item does not exist.
|
||||
*/
|
||||
FileIndex.prototype.getInode = function (path) {
|
||||
var splitPath = this._split_path(path);
|
||||
var dirpath = splitPath[0];
|
||||
var itemname = splitPath[1];
|
||||
// Retrieve from its parent directory.
|
||||
var parent = this._index[dirpath];
|
||||
if (parent === undefined) {
|
||||
return null;
|
||||
}
|
||||
// Root case
|
||||
if (dirpath === path) {
|
||||
return parent;
|
||||
}
|
||||
return parent.getItem(itemname);
|
||||
};
|
||||
/**
|
||||
* Split into a (directory path, item name) pair
|
||||
*/
|
||||
FileIndex.prototype._split_path = function (p) {
|
||||
var dirpath = path.dirname(p);
|
||||
var itemname = p.substr(dirpath.length + (dirpath === "/" ? 0 : 1));
|
||||
return [dirpath, itemname];
|
||||
};
|
||||
return FileIndex;
|
||||
}());
|
||||
exports.FileIndex = FileIndex;
|
||||
/**
|
||||
* Inode for a file. Stores an arbitrary (filesystem-specific) data payload.
|
||||
*/
|
||||
var FileInode = /** @class */ (function () {
|
||||
function FileInode(data) {
|
||||
this.data = data;
|
||||
}
|
||||
FileInode.prototype.isFile = function () { return true; };
|
||||
FileInode.prototype.isDir = function () { return false; };
|
||||
FileInode.prototype.getData = function () { return this.data; };
|
||||
FileInode.prototype.setData = function (data) { this.data = data; };
|
||||
return FileInode;
|
||||
}());
|
||||
exports.FileInode = FileInode;
|
||||
/**
|
||||
* Inode for a directory. Currently only contains the directory listing.
|
||||
*/
|
||||
var DirInode = /** @class */ (function () {
|
||||
/**
|
||||
* Constructs an inode for a directory.
|
||||
*/
|
||||
function DirInode(data) {
|
||||
if (data === void 0) { data = null; }
|
||||
this.data = data;
|
||||
this._ls = {};
|
||||
}
|
||||
DirInode.prototype.isFile = function () {
|
||||
return false;
|
||||
};
|
||||
DirInode.prototype.isDir = function () {
|
||||
return true;
|
||||
};
|
||||
DirInode.prototype.getData = function () { return this.data; };
|
||||
/**
|
||||
* Return a Stats object for this inode.
|
||||
* @todo Should probably remove this at some point. This isn't the
|
||||
* responsibility of the FileIndex.
|
||||
*/
|
||||
DirInode.prototype.getStats = function () {
|
||||
return new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 4096, 0x16D);
|
||||
};
|
||||
/**
|
||||
* Returns the directory listing for this directory. Paths in the directory are
|
||||
* relative to the directory's path.
|
||||
* @return The directory listing for this directory.
|
||||
*/
|
||||
DirInode.prototype.getListing = function () {
|
||||
return Object.keys(this._ls);
|
||||
};
|
||||
/**
|
||||
* Returns the inode for the indicated item, or null if it does not exist.
|
||||
* @param p Name of item in this directory.
|
||||
*/
|
||||
DirInode.prototype.getItem = function (p) {
|
||||
var item = this._ls[p];
|
||||
return item && this._ls.hasOwnProperty(p) ? item : null;
|
||||
};
|
||||
/**
|
||||
* Add the given item to the directory listing. Note that the given inode is
|
||||
* not copied, and will be mutated by the DirInode if it is a DirInode.
|
||||
* @param p Item name to add to the directory listing.
|
||||
* @param inode The inode for the
|
||||
* item to add to the directory inode.
|
||||
* @return True if it was added, false if it already existed.
|
||||
*/
|
||||
DirInode.prototype.addItem = function (p, inode) {
|
||||
if (p in this._ls) {
|
||||
return false;
|
||||
}
|
||||
this._ls[p] = inode;
|
||||
return true;
|
||||
};
|
||||
/**
|
||||
* Removes the given item from the directory listing.
|
||||
* @param p Name of item to remove from the directory listing.
|
||||
* @return Returns the item
|
||||
* removed, or null if the item did not exist.
|
||||
*/
|
||||
DirInode.prototype.remItem = function (p) {
|
||||
var item = this._ls[p];
|
||||
if (item === undefined) {
|
||||
return null;
|
||||
}
|
||||
delete this._ls[p];
|
||||
return item;
|
||||
};
|
||||
return DirInode;
|
||||
}());
|
||||
exports.DirInode = DirInode;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function isFileInode(inode) {
|
||||
return !!inode && inode.isFile();
|
||||
}
|
||||
exports.isFileInode = isFileInode;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function isDirInode(inode) {
|
||||
return !!inode && inode.isDir();
|
||||
}
|
||||
exports.isDirInode = isDirInode;
|
||||
//# sourceMappingURL=file_index.js.map
|
||||
49
sandpack-generated/static/browserfs11/node/generic/inode.d.ts
vendored
Normal file
49
sandpack-generated/static/browserfs11/node/generic/inode.d.ts
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
/// <reference types="node" />
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
/**
|
||||
* Generic inode definition that can easily be serialized.
|
||||
*/
|
||||
export default class Inode {
|
||||
id: string;
|
||||
size: number;
|
||||
mode: number;
|
||||
atime: number;
|
||||
mtime: number;
|
||||
ctime: number;
|
||||
/**
|
||||
* Converts the buffer into an Inode.
|
||||
*/
|
||||
static fromBuffer(buffer: Buffer): Inode;
|
||||
constructor(id: string, size: number, mode: number, atime: number, mtime: number, ctime: number);
|
||||
/**
|
||||
* Handy function that converts the Inode to a Node Stats object.
|
||||
*/
|
||||
toStats(): Stats;
|
||||
/**
|
||||
* Get the size of this Inode, in bytes.
|
||||
*/
|
||||
getSize(): number;
|
||||
/**
|
||||
* Writes the inode into the start of the buffer.
|
||||
*/
|
||||
toBuffer(buff?: Buffer): Buffer;
|
||||
/**
|
||||
* Updates the Inode using information from the stats object. Used by file
|
||||
* systems at sync time, e.g.:
|
||||
* - Program opens file and gets a File object.
|
||||
* - Program mutates file. File object is responsible for maintaining
|
||||
* metadata changes locally -- typically in a Stats object.
|
||||
* - Program closes file. File object's metadata changes are synced with the
|
||||
* file system.
|
||||
* @return True if any changes have occurred.
|
||||
*/
|
||||
update(stats: Stats): boolean;
|
||||
/**
|
||||
* @return [Boolean] True if this item is a file.
|
||||
*/
|
||||
isFile(): boolean;
|
||||
/**
|
||||
* @return [Boolean] True if this item is a directory.
|
||||
*/
|
||||
isDirectory(): boolean;
|
||||
}
|
||||
105
sandpack-generated/static/browserfs11/node/generic/inode.js
Normal file
105
sandpack-generated/static/browserfs11/node/generic/inode.js
Normal file
@@ -0,0 +1,105 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
/**
|
||||
* Generic inode definition that can easily be serialized.
|
||||
*/
|
||||
var Inode = /** @class */ (function () {
|
||||
function Inode(id, size, mode, atime, mtime, ctime) {
|
||||
this.id = id;
|
||||
this.size = size;
|
||||
this.mode = mode;
|
||||
this.atime = atime;
|
||||
this.mtime = mtime;
|
||||
this.ctime = ctime;
|
||||
}
|
||||
/**
|
||||
* Converts the buffer into an Inode.
|
||||
*/
|
||||
Inode.fromBuffer = function (buffer) {
|
||||
if (buffer === undefined) {
|
||||
throw new Error("NO");
|
||||
}
|
||||
return new Inode(buffer.toString('ascii', 30), buffer.readUInt32LE(0), buffer.readUInt16LE(4), buffer.readDoubleLE(6), buffer.readDoubleLE(14), buffer.readDoubleLE(22));
|
||||
};
|
||||
/**
|
||||
* Handy function that converts the Inode to a Node Stats object.
|
||||
*/
|
||||
Inode.prototype.toStats = function () {
|
||||
return new node_fs_stats_1.default((this.mode & 0xF000) === node_fs_stats_1.FileType.DIRECTORY ? node_fs_stats_1.FileType.DIRECTORY : node_fs_stats_1.FileType.FILE, this.size, this.mode, this.atime, this.mtime, this.ctime);
|
||||
};
|
||||
/**
|
||||
* Get the size of this Inode, in bytes.
|
||||
*/
|
||||
Inode.prototype.getSize = function () {
|
||||
// ASSUMPTION: ID is ASCII (1 byte per char).
|
||||
return 30 + this.id.length;
|
||||
};
|
||||
/**
|
||||
* Writes the inode into the start of the buffer.
|
||||
*/
|
||||
Inode.prototype.toBuffer = function (buff) {
|
||||
if (buff === void 0) { buff = Buffer.alloc(this.getSize()); }
|
||||
buff.writeUInt32LE(this.size, 0);
|
||||
buff.writeUInt16LE(this.mode, 4);
|
||||
buff.writeDoubleLE(this.atime, 6);
|
||||
buff.writeDoubleLE(this.mtime, 14);
|
||||
buff.writeDoubleLE(this.ctime, 22);
|
||||
buff.write(this.id, 30, this.id.length, 'ascii');
|
||||
return buff;
|
||||
};
|
||||
/**
|
||||
* Updates the Inode using information from the stats object. Used by file
|
||||
* systems at sync time, e.g.:
|
||||
* - Program opens file and gets a File object.
|
||||
* - Program mutates file. File object is responsible for maintaining
|
||||
* metadata changes locally -- typically in a Stats object.
|
||||
* - Program closes file. File object's metadata changes are synced with the
|
||||
* file system.
|
||||
* @return True if any changes have occurred.
|
||||
*/
|
||||
Inode.prototype.update = function (stats) {
|
||||
var hasChanged = false;
|
||||
if (this.size !== stats.size) {
|
||||
this.size = stats.size;
|
||||
hasChanged = true;
|
||||
}
|
||||
if (this.mode !== stats.mode) {
|
||||
this.mode = stats.mode;
|
||||
hasChanged = true;
|
||||
}
|
||||
var atimeMs = stats.atime.getTime();
|
||||
if (this.atime !== atimeMs) {
|
||||
this.atime = atimeMs;
|
||||
hasChanged = true;
|
||||
}
|
||||
var mtimeMs = stats.mtime.getTime();
|
||||
if (this.mtime !== mtimeMs) {
|
||||
this.mtime = mtimeMs;
|
||||
hasChanged = true;
|
||||
}
|
||||
var ctimeMs = stats.ctime.getTime();
|
||||
if (this.ctime !== ctimeMs) {
|
||||
this.ctime = ctimeMs;
|
||||
hasChanged = true;
|
||||
}
|
||||
return hasChanged;
|
||||
};
|
||||
// XXX: Copied from Stats. Should reconcile these two into something more
|
||||
// compact.
|
||||
/**
|
||||
* @return [Boolean] True if this item is a file.
|
||||
*/
|
||||
Inode.prototype.isFile = function () {
|
||||
return (this.mode & 0xF000) === node_fs_stats_1.FileType.FILE;
|
||||
};
|
||||
/**
|
||||
* @return [Boolean] True if this item is a directory.
|
||||
*/
|
||||
Inode.prototype.isDirectory = function () {
|
||||
return (this.mode & 0xF000) === node_fs_stats_1.FileType.DIRECTORY;
|
||||
};
|
||||
return Inode;
|
||||
}());
|
||||
exports.default = Inode;
|
||||
//# sourceMappingURL=inode.js.map
|
||||
367
sandpack-generated/static/browserfs11/node/generic/key_value_filesystem.d.ts
vendored
Normal file
367
sandpack-generated/static/browserfs11/node/generic/key_value_filesystem.d.ts
vendored
Normal file
@@ -0,0 +1,367 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFileSystem, SynchronousFileSystem, BFSOneArgCallback, BFSCallback } from '../core/file_system';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { File } from '../core/file';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import PreloadFile from '../generic/preload_file';
|
||||
/**
|
||||
* Represents a *synchronous* key-value store.
|
||||
*/
|
||||
export interface SyncKeyValueStore {
|
||||
/**
|
||||
* The name of the key-value store.
|
||||
*/
|
||||
name(): string;
|
||||
/**
|
||||
* Empties the key-value store completely.
|
||||
*/
|
||||
clear(): void;
|
||||
/**
|
||||
* Begins a new read-only transaction.
|
||||
*/
|
||||
beginTransaction(type: "readonly"): SyncKeyValueROTransaction;
|
||||
/**
|
||||
* Begins a new read-write transaction.
|
||||
*/
|
||||
beginTransaction(type: "readwrite"): SyncKeyValueRWTransaction;
|
||||
beginTransaction(type: string): SyncKeyValueROTransaction;
|
||||
}
|
||||
/**
|
||||
* A read-only transaction for a synchronous key value store.
|
||||
*/
|
||||
export interface SyncKeyValueROTransaction {
|
||||
/**
|
||||
* Retrieves the data at the given key. Throws an ApiError if an error occurs
|
||||
* or if the key does not exist.
|
||||
* @param key The key to look under for data.
|
||||
* @return The data stored under the key, or undefined if not present.
|
||||
*/
|
||||
get(key: string): Buffer | undefined;
|
||||
}
|
||||
/**
|
||||
* A read-write transaction for a synchronous key value store.
|
||||
*/
|
||||
export interface SyncKeyValueRWTransaction extends SyncKeyValueROTransaction {
|
||||
/**
|
||||
* Adds the data to the store under the given key.
|
||||
* @param key The key to add the data under.
|
||||
* @param data The data to add to the store.
|
||||
* @param overwrite If 'true', overwrite any existing data. If 'false',
|
||||
* avoids storing the data if the key exists.
|
||||
* @return True if storage succeeded, false otherwise.
|
||||
*/
|
||||
put(key: string, data: Buffer, overwrite: boolean): boolean;
|
||||
/**
|
||||
* Deletes the data at the given key.
|
||||
* @param key The key to delete from the store.
|
||||
*/
|
||||
del(key: string): void;
|
||||
/**
|
||||
* Commits the transaction.
|
||||
*/
|
||||
commit(): void;
|
||||
/**
|
||||
* Aborts and rolls back the transaction.
|
||||
*/
|
||||
abort(): void;
|
||||
}
|
||||
/**
|
||||
* An interface for simple synchronous key-value stores that don't have special
|
||||
* support for transactions and such.
|
||||
*/
|
||||
export interface SimpleSyncStore {
|
||||
get(key: string): Buffer | undefined;
|
||||
put(key: string, data: Buffer, overwrite: boolean): boolean;
|
||||
del(key: string): void;
|
||||
}
|
||||
/**
|
||||
* A simple RW transaction for simple synchronous key-value stores.
|
||||
*/
|
||||
export declare class SimpleSyncRWTransaction implements SyncKeyValueRWTransaction {
|
||||
private store;
|
||||
/**
|
||||
* Stores data in the keys we modify prior to modifying them.
|
||||
* Allows us to roll back commits.
|
||||
*/
|
||||
private originalData;
|
||||
/**
|
||||
* List of keys modified in this transaction, if any.
|
||||
*/
|
||||
private modifiedKeys;
|
||||
constructor(store: SimpleSyncStore);
|
||||
get(key: string): Buffer | undefined;
|
||||
put(key: string, data: Buffer, overwrite: boolean): boolean;
|
||||
del(key: string): void;
|
||||
commit(): void;
|
||||
abort(): void;
|
||||
/**
|
||||
* Stashes given key value pair into `originalData` if it doesn't already
|
||||
* exist. Allows us to stash values the program is requesting anyway to
|
||||
* prevent needless `get` requests if the program modifies the data later
|
||||
* on during the transaction.
|
||||
*/
|
||||
private stashOldValue;
|
||||
/**
|
||||
* Marks the given key as modified, and stashes its value if it has not been
|
||||
* stashed already.
|
||||
*/
|
||||
private markModified;
|
||||
}
|
||||
export interface SyncKeyValueFileSystemOptions {
|
||||
/**
|
||||
* The actual key-value store to read from/write to.
|
||||
*/
|
||||
store: SyncKeyValueStore;
|
||||
}
|
||||
export declare class SyncKeyValueFile extends PreloadFile<SyncKeyValueFileSystem> implements File {
|
||||
constructor(_fs: SyncKeyValueFileSystem, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
syncSync(): void;
|
||||
closeSync(): void;
|
||||
}
|
||||
/**
|
||||
* A "Synchronous key-value file system". Stores data to/retrieves data from an
|
||||
* underlying key-value store.
|
||||
*
|
||||
* We use a unique ID for each node in the file system. The root node has a
|
||||
* fixed ID.
|
||||
* @todo Introduce Node ID caching.
|
||||
* @todo Check modes.
|
||||
*/
|
||||
export declare class SyncKeyValueFileSystem extends SynchronousFileSystem {
|
||||
static isAvailable(): boolean;
|
||||
private store;
|
||||
constructor(options: SyncKeyValueFileSystemOptions);
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsSymlinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Delete all contents stored in the file system.
|
||||
*/
|
||||
empty(): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
statSync(p: string, isLstat: boolean): Stats;
|
||||
createFileSync(p: string, flag: FileFlag, mode: number): File;
|
||||
openFileSync(p: string, flag: FileFlag): File;
|
||||
unlinkSync(p: string): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
readdirSync(p: string): string[];
|
||||
_syncSync(p: string, data: Buffer, stats: Stats): void;
|
||||
/**
|
||||
* Checks if the root directory exists. Creates it if it doesn't.
|
||||
*/
|
||||
private makeRootDirectory;
|
||||
/**
|
||||
* Helper function for findINode.
|
||||
* @param parent The parent directory of the file we are attempting to find.
|
||||
* @param filename The filename of the inode we are attempting to find, minus
|
||||
* the parent.
|
||||
* @return string The ID of the file's inode in the file system.
|
||||
*/
|
||||
private _findINode;
|
||||
/**
|
||||
* Finds the Inode of the given path.
|
||||
* @param p The path to look up.
|
||||
* @return The Inode of the path p.
|
||||
* @todo memoize/cache
|
||||
*/
|
||||
private findINode;
|
||||
/**
|
||||
* Given the ID of a node, retrieves the corresponding Inode.
|
||||
* @param tx The transaction to use.
|
||||
* @param p The corresponding path to the file (used for error messages).
|
||||
* @param id The ID to look up.
|
||||
*/
|
||||
private getINode;
|
||||
/**
|
||||
* Given the Inode of a directory, retrieves the corresponding directory
|
||||
* listing.
|
||||
*/
|
||||
private getDirListing;
|
||||
/**
|
||||
* Creates a new node under a random ID. Retries 5 times before giving up in
|
||||
* the exceedingly unlikely chance that we try to reuse a random GUID.
|
||||
* @return The GUID that the data was stored under.
|
||||
*/
|
||||
private addNewNode;
|
||||
/**
|
||||
* Commits a new file (well, a FILE or a DIRECTORY) to the file system with
|
||||
* the given mode.
|
||||
* Note: This will commit the transaction.
|
||||
* @param p The path to the new file.
|
||||
* @param type The type of the new file.
|
||||
* @param mode The mode to create the new file with.
|
||||
* @param data The data to store at the file's data node.
|
||||
* @return The Inode for the new file.
|
||||
*/
|
||||
private commitNewFile;
|
||||
/**
|
||||
* Remove all traces of the given path from the file system.
|
||||
* @param p The path to remove from the file system.
|
||||
* @param isDir Does the path belong to a directory, or a file?
|
||||
* @todo Update mtime.
|
||||
*/
|
||||
private removeEntry;
|
||||
}
|
||||
/**
|
||||
* Represents an *asynchronous* key-value store.
|
||||
*/
|
||||
export interface AsyncKeyValueStore {
|
||||
/**
|
||||
* The name of the key-value store.
|
||||
*/
|
||||
name(): string;
|
||||
/**
|
||||
* Empties the key-value store completely.
|
||||
*/
|
||||
clear(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Begins a read-write transaction.
|
||||
*/
|
||||
beginTransaction(type: 'readwrite'): AsyncKeyValueRWTransaction;
|
||||
/**
|
||||
* Begins a read-only transaction.
|
||||
*/
|
||||
beginTransaction(type: 'readonly'): AsyncKeyValueROTransaction;
|
||||
beginTransaction(type: string): AsyncKeyValueROTransaction;
|
||||
}
|
||||
/**
|
||||
* Represents an asynchronous read-only transaction.
|
||||
*/
|
||||
export interface AsyncKeyValueROTransaction {
|
||||
/**
|
||||
* Retrieves the data at the given key.
|
||||
* @param key The key to look under for data.
|
||||
*/
|
||||
get(key: string, cb: BFSCallback<Buffer>): void;
|
||||
}
|
||||
/**
|
||||
* Represents an asynchronous read-write transaction.
|
||||
*/
|
||||
export interface AsyncKeyValueRWTransaction extends AsyncKeyValueROTransaction {
|
||||
/**
|
||||
* Adds the data to the store under the given key. Overwrites any existing
|
||||
* data.
|
||||
* @param key The key to add the data under.
|
||||
* @param data The data to add to the store.
|
||||
* @param overwrite If 'true', overwrite any existing data. If 'false',
|
||||
* avoids writing the data if the key exists.
|
||||
* @param cb Triggered with an error and whether or not the value was
|
||||
* committed.
|
||||
*/
|
||||
put(key: string, data: Buffer, overwrite: boolean, cb: BFSCallback<boolean>): void;
|
||||
/**
|
||||
* Deletes the data at the given key.
|
||||
* @param key The key to delete from the store.
|
||||
*/
|
||||
del(key: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Commits the transaction.
|
||||
*/
|
||||
commit(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Aborts and rolls back the transaction.
|
||||
*/
|
||||
abort(cb: BFSOneArgCallback): void;
|
||||
}
|
||||
export declare class AsyncKeyValueFile extends PreloadFile<AsyncKeyValueFileSystem> implements File {
|
||||
constructor(_fs: AsyncKeyValueFileSystem, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
}
|
||||
/**
|
||||
* An "Asynchronous key-value file system". Stores data to/retrieves data from
|
||||
* an underlying asynchronous key-value store.
|
||||
*/
|
||||
export declare class AsyncKeyValueFileSystem extends BaseFileSystem {
|
||||
static isAvailable(): boolean;
|
||||
protected store: AsyncKeyValueStore;
|
||||
private _cache;
|
||||
constructor(cacheSize: number);
|
||||
/**
|
||||
* Initializes the file system. Typically called by subclasses' async
|
||||
* constructors.
|
||||
*/
|
||||
init(store: AsyncKeyValueStore, cb: BFSOneArgCallback): void;
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsSymlinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Delete all contents stored in the file system.
|
||||
*/
|
||||
empty(cb: BFSOneArgCallback): void;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
stat(p: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
createFile(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openFile(p: string, flag: FileFlag, cb: BFSCallback<File>): void;
|
||||
unlink(p: string, cb: BFSOneArgCallback): void;
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
mkdir(p: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
_sync(p: string, data: Buffer, stats: Stats, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Checks if the root directory exists. Creates it if it doesn't.
|
||||
*/
|
||||
private makeRootDirectory;
|
||||
/**
|
||||
* Helper function for findINode.
|
||||
* @param parent The parent directory of the file we are attempting to find.
|
||||
* @param filename The filename of the inode we are attempting to find, minus
|
||||
* the parent.
|
||||
* @param cb Passed an error or the ID of the file's inode in the file system.
|
||||
*/
|
||||
private _findINode;
|
||||
/**
|
||||
* Finds the Inode of the given path.
|
||||
* @param p The path to look up.
|
||||
* @param cb Passed an error or the Inode of the path p.
|
||||
* @todo memoize/cache
|
||||
*/
|
||||
private findINode;
|
||||
/**
|
||||
* Given the ID of a node, retrieves the corresponding Inode.
|
||||
* @param tx The transaction to use.
|
||||
* @param p The corresponding path to the file (used for error messages).
|
||||
* @param id The ID to look up.
|
||||
* @param cb Passed an error or the inode under the given id.
|
||||
*/
|
||||
private getINode;
|
||||
/**
|
||||
* Given the Inode of a directory, retrieves the corresponding directory
|
||||
* listing.
|
||||
*/
|
||||
private getDirListing;
|
||||
/**
|
||||
* Given a path to a directory, retrieves the corresponding INode and
|
||||
* directory listing.
|
||||
*/
|
||||
private findINodeAndDirListing;
|
||||
/**
|
||||
* Adds a new node under a random ID. Retries 5 times before giving up in
|
||||
* the exceedingly unlikely chance that we try to reuse a random GUID.
|
||||
* @param cb Passed an error or the GUID that the data was stored under.
|
||||
*/
|
||||
private addNewNode;
|
||||
/**
|
||||
* Commits a new file (well, a FILE or a DIRECTORY) to the file system with
|
||||
* the given mode.
|
||||
* Note: This will commit the transaction.
|
||||
* @param p The path to the new file.
|
||||
* @param type The type of the new file.
|
||||
* @param mode The mode to create the new file with.
|
||||
* @param data The data to store at the file's data node.
|
||||
* @param cb Passed an error or the Inode for the new file.
|
||||
*/
|
||||
private commitNewFile;
|
||||
/**
|
||||
* Remove all traces of the given path from the file system.
|
||||
* @param p The path to remove from the file system.
|
||||
* @param isDir Does the path belong to a directory, or a file?
|
||||
* @todo Update mtime.
|
||||
*/
|
||||
private removeEntry;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
68
sandpack-generated/static/browserfs11/node/generic/locked_fs.d.ts
vendored
Normal file
68
sandpack-generated/static/browserfs11/node/generic/locked_fs.d.ts
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
/// <reference types="node" />
|
||||
import { FileSystem, BFSOneArgCallback, BFSCallback } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { File } from '../core/file';
|
||||
/**
|
||||
* This class serializes access to an underlying async filesystem.
|
||||
* For example, on an OverlayFS instance with an async lower
|
||||
* directory operations like rename and rmdir may involve multiple
|
||||
* requests involving both the upper and lower filesystems -- they
|
||||
* are not executed in a single atomic step. OverlayFS uses this
|
||||
* LockedFS to avoid having to reason about the correctness of
|
||||
* multiple requests interleaving.
|
||||
*/
|
||||
export default class LockedFS<T extends FileSystem> implements FileSystem {
|
||||
private _fs;
|
||||
private _mu;
|
||||
constructor(fs: T);
|
||||
getName(): string;
|
||||
getFSUnlocked(): T;
|
||||
diskSpace(p: string, cb: (total: number, free: number) => any): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
stat(p: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
statSync(p: string, isLstat: boolean): Stats;
|
||||
open(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openSync(p: string, flag: FileFlag, mode: number): File;
|
||||
unlink(p: string, cb: BFSOneArgCallback): void;
|
||||
unlinkSync(p: string): void;
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdir(p: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(p: string): string[];
|
||||
exists(p: string, cb: (exists: boolean) => void): void;
|
||||
existsSync(p: string): boolean;
|
||||
realpath(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}, cb: BFSCallback<string>): void;
|
||||
realpathSync(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}): string;
|
||||
truncate(p: string, len: number, cb: BFSOneArgCallback): void;
|
||||
truncateSync(p: string, len: number): void;
|
||||
readFile(fname: string, encoding: string, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
writeFile(fname: string, data: any, encoding: string, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
writeFileSync(fname: string, data: any, encoding: string, flag: FileFlag, mode: number): void;
|
||||
appendFile(fname: string, data: any, encoding: string, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
appendFileSync(fname: string, data: any, encoding: string, flag: FileFlag, mode: number): void;
|
||||
chmod(p: string, isLchmod: boolean, mode: number, cb: BFSOneArgCallback): void;
|
||||
chmodSync(p: string, isLchmod: boolean, mode: number): void;
|
||||
chown(p: string, isLchown: boolean, uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
chownSync(p: string, isLchown: boolean, uid: number, gid: number): void;
|
||||
utimes(p: string, atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
utimesSync(p: string, atime: Date, mtime: Date): void;
|
||||
link(srcpath: string, dstpath: string, cb: BFSOneArgCallback): void;
|
||||
linkSync(srcpath: string, dstpath: string): void;
|
||||
symlink(srcpath: string, dstpath: string, type: string, cb: BFSOneArgCallback): void;
|
||||
symlinkSync(srcpath: string, dstpath: string, type: string): void;
|
||||
readlink(p: string, cb: BFSCallback<string>): void;
|
||||
readlinkSync(p: string): string;
|
||||
}
|
||||
328
sandpack-generated/static/browserfs11/node/generic/locked_fs.js
Normal file
328
sandpack-generated/static/browserfs11/node/generic/locked_fs.js
Normal file
@@ -0,0 +1,328 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var mutex_1 = require("./mutex");
|
||||
/**
|
||||
* This class serializes access to an underlying async filesystem.
|
||||
* For example, on an OverlayFS instance with an async lower
|
||||
* directory operations like rename and rmdir may involve multiple
|
||||
* requests involving both the upper and lower filesystems -- they
|
||||
* are not executed in a single atomic step. OverlayFS uses this
|
||||
* LockedFS to avoid having to reason about the correctness of
|
||||
* multiple requests interleaving.
|
||||
*/
|
||||
var LockedFS = /** @class */ (function () {
|
||||
function LockedFS(fs) {
|
||||
this._fs = fs;
|
||||
this._mu = new mutex_1.default();
|
||||
}
|
||||
LockedFS.prototype.getName = function () {
|
||||
return 'LockedFS<' + this._fs.getName() + '>';
|
||||
};
|
||||
LockedFS.prototype.getFSUnlocked = function () {
|
||||
return this._fs;
|
||||
};
|
||||
LockedFS.prototype.diskSpace = function (p, cb) {
|
||||
// FIXME: should this lock?
|
||||
this._fs.diskSpace(p, cb);
|
||||
};
|
||||
LockedFS.prototype.isReadOnly = function () {
|
||||
return this._fs.isReadOnly();
|
||||
};
|
||||
LockedFS.prototype.supportsLinks = function () {
|
||||
return this._fs.supportsLinks();
|
||||
};
|
||||
LockedFS.prototype.supportsProps = function () {
|
||||
return this._fs.supportsProps();
|
||||
};
|
||||
LockedFS.prototype.supportsSynch = function () {
|
||||
return this._fs.supportsSynch();
|
||||
};
|
||||
LockedFS.prototype.rename = function (oldPath, newPath, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.rename(oldPath, newPath, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.renameSync = function (oldPath, newPath) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.renameSync(oldPath, newPath);
|
||||
};
|
||||
LockedFS.prototype.stat = function (p, isLstat, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.stat(p, isLstat, function (err, stat) {
|
||||
_this._mu.unlock();
|
||||
cb(err, stat);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.statSync = function (p, isLstat) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.statSync(p, isLstat);
|
||||
};
|
||||
LockedFS.prototype.open = function (p, flag, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.open(p, flag, mode, function (err, fd) {
|
||||
_this._mu.unlock();
|
||||
cb(err, fd);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.openSync = function (p, flag, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.openSync(p, flag, mode);
|
||||
};
|
||||
LockedFS.prototype.unlink = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.unlink(p, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.unlinkSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.unlinkSync(p);
|
||||
};
|
||||
LockedFS.prototype.rmdir = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.rmdir(p, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.rmdirSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.rmdirSync(p);
|
||||
};
|
||||
LockedFS.prototype.mkdir = function (p, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.mkdir(p, mode, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.mkdirSync = function (p, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.mkdirSync(p, mode);
|
||||
};
|
||||
LockedFS.prototype.readdir = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.readdir(p, function (err, files) {
|
||||
_this._mu.unlock();
|
||||
cb(err, files);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.readdirSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.readdirSync(p);
|
||||
};
|
||||
LockedFS.prototype.exists = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.exists(p, function (exists) {
|
||||
_this._mu.unlock();
|
||||
cb(exists);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.existsSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.existsSync(p);
|
||||
};
|
||||
LockedFS.prototype.realpath = function (p, cache, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.realpath(p, cache, function (err, resolvedPath) {
|
||||
_this._mu.unlock();
|
||||
cb(err, resolvedPath);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.realpathSync = function (p, cache) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.realpathSync(p, cache);
|
||||
};
|
||||
LockedFS.prototype.truncate = function (p, len, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.truncate(p, len, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.truncateSync = function (p, len) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.truncateSync(p, len);
|
||||
};
|
||||
LockedFS.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.readFile(fname, encoding, flag, function (err, data) {
|
||||
_this._mu.unlock();
|
||||
cb(err, data);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.readFileSync(fname, encoding, flag);
|
||||
};
|
||||
LockedFS.prototype.writeFile = function (fname, data, encoding, flag, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.writeFile(fname, data, encoding, flag, mode, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.writeFileSync = function (fname, data, encoding, flag, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.writeFileSync(fname, data, encoding, flag, mode);
|
||||
};
|
||||
LockedFS.prototype.appendFile = function (fname, data, encoding, flag, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.appendFile(fname, data, encoding, flag, mode, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.appendFileSync = function (fname, data, encoding, flag, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.appendFileSync(fname, data, encoding, flag, mode);
|
||||
};
|
||||
LockedFS.prototype.chmod = function (p, isLchmod, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.chmod(p, isLchmod, mode, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.chmodSync = function (p, isLchmod, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.chmodSync(p, isLchmod, mode);
|
||||
};
|
||||
LockedFS.prototype.chown = function (p, isLchown, uid, gid, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.chown(p, isLchown, uid, gid, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.chownSync = function (p, isLchown, uid, gid) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.chownSync(p, isLchown, uid, gid);
|
||||
};
|
||||
LockedFS.prototype.utimes = function (p, atime, mtime, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.utimes(p, atime, mtime, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.utimesSync = function (p, atime, mtime) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.utimesSync(p, atime, mtime);
|
||||
};
|
||||
LockedFS.prototype.link = function (srcpath, dstpath, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.link(srcpath, dstpath, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.linkSync = function (srcpath, dstpath) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.linkSync(srcpath, dstpath);
|
||||
};
|
||||
LockedFS.prototype.symlink = function (srcpath, dstpath, type, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.symlink(srcpath, dstpath, type, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.symlinkSync = function (srcpath, dstpath, type) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.symlinkSync(srcpath, dstpath, type);
|
||||
};
|
||||
LockedFS.prototype.readlink = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.readlink(p, function (err, linkString) {
|
||||
_this._mu.unlock();
|
||||
cb(err, linkString);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.readlinkSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.readlinkSync(p);
|
||||
};
|
||||
return LockedFS;
|
||||
}());
|
||||
exports.default = LockedFS;
|
||||
//# sourceMappingURL=locked_fs.js.map
|
||||
12
sandpack-generated/static/browserfs11/node/generic/mutex.d.ts
vendored
Normal file
12
sandpack-generated/static/browserfs11/node/generic/mutex.d.ts
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
/**
|
||||
* Non-recursive mutex
|
||||
* @hidden
|
||||
*/
|
||||
export default class Mutex {
|
||||
private _locked;
|
||||
private _waiters;
|
||||
lock(cb: Function): void;
|
||||
unlock(): void;
|
||||
tryLock(): boolean;
|
||||
isLocked(): boolean;
|
||||
}
|
||||
51
sandpack-generated/static/browserfs11/node/generic/mutex.js
Normal file
51
sandpack-generated/static/browserfs11/node/generic/mutex.js
Normal file
@@ -0,0 +1,51 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var setImmediate_1 = require("../generic/setImmediate");
|
||||
/**
|
||||
* Non-recursive mutex
|
||||
* @hidden
|
||||
*/
|
||||
var Mutex = /** @class */ (function () {
|
||||
function Mutex() {
|
||||
this._locked = false;
|
||||
this._waiters = [];
|
||||
}
|
||||
Mutex.prototype.lock = function (cb) {
|
||||
if (this._locked) {
|
||||
this._waiters.push(cb);
|
||||
return;
|
||||
}
|
||||
this._locked = true;
|
||||
cb();
|
||||
};
|
||||
Mutex.prototype.unlock = function () {
|
||||
if (!this._locked) {
|
||||
throw new Error('unlock of a non-locked mutex');
|
||||
}
|
||||
var next = this._waiters.shift();
|
||||
// don't unlock - we want to queue up next for the
|
||||
// _end_ of the current task execution, but we don't
|
||||
// want it to be called inline with whatever the
|
||||
// current stack is. This way we still get the nice
|
||||
// behavior that an unlock immediately followed by a
|
||||
// lock won't cause starvation.
|
||||
if (next) {
|
||||
(0, setImmediate_1.default)(next);
|
||||
return;
|
||||
}
|
||||
this._locked = false;
|
||||
};
|
||||
Mutex.prototype.tryLock = function () {
|
||||
if (this._locked) {
|
||||
return false;
|
||||
}
|
||||
this._locked = true;
|
||||
return true;
|
||||
};
|
||||
Mutex.prototype.isLocked = function () {
|
||||
return this._locked;
|
||||
};
|
||||
return Mutex;
|
||||
}());
|
||||
exports.default = Mutex;
|
||||
//# sourceMappingURL=mutex.js.map
|
||||
209
sandpack-generated/static/browserfs11/node/generic/preload_file.d.ts
vendored
Normal file
209
sandpack-generated/static/browserfs11/node/generic/preload_file.d.ts
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFile, File } from '../core/file';
|
||||
import { FileSystem, BFSOneArgCallback, BFSCallback, BFSThreeArgCallback } from '../core/file_system';
|
||||
import Stats from '../core/node_fs_stats';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
/**
|
||||
* An implementation of the File interface that operates on a file that is
|
||||
* completely in-memory. PreloadFiles are backed by a Buffer.
|
||||
*
|
||||
* This is also an abstract class, as it lacks an implementation of 'sync' and
|
||||
* 'close'. Each filesystem that wishes to use this file representation must
|
||||
* extend this class and implement those two methods.
|
||||
* @todo 'close' lever that disables functionality once closed.
|
||||
*/
|
||||
export default class PreloadFile<T extends FileSystem> extends BaseFile {
|
||||
protected _fs: T;
|
||||
private _pos;
|
||||
private _path;
|
||||
private _stat;
|
||||
private _flag;
|
||||
private _buffer;
|
||||
private _dirty;
|
||||
/**
|
||||
* Creates a file with the given path and, optionally, the given contents. Note
|
||||
* that, if contents is specified, it will be mutated by the file!
|
||||
* @param _fs The file system that created the file.
|
||||
* @param _path
|
||||
* @param _mode The mode that the file was opened using.
|
||||
* Dictates permissions and where the file pointer starts.
|
||||
* @param _stat The stats object for the given file.
|
||||
* PreloadFile will mutate this object. Note that this object must contain
|
||||
* the appropriate mode that the file was opened as.
|
||||
* @param contents A buffer containing the entire
|
||||
* contents of the file. PreloadFile will mutate this buffer. If not
|
||||
* specified, we assume it is a new file.
|
||||
*/
|
||||
constructor(_fs: T, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
/**
|
||||
* NONSTANDARD: Get the underlying buffer for this file. !!DO NOT MUTATE!! Will mess up dirty tracking.
|
||||
*/
|
||||
getBuffer(): Buffer;
|
||||
/**
|
||||
* NONSTANDARD: Get underlying stats for this file. !!DO NOT MUTATE!!
|
||||
*/
|
||||
getStats(): Stats;
|
||||
getFlag(): FileFlag;
|
||||
/**
|
||||
* Get the path to this file.
|
||||
* @return [String] The path to the file.
|
||||
*/
|
||||
getPath(): string;
|
||||
/**
|
||||
* Get the current file position.
|
||||
*
|
||||
* We emulate the following bug mentioned in the Node documentation:
|
||||
* > On Linux, positional writes don't work when the file is opened in append
|
||||
* mode. The kernel ignores the position argument and always appends the data
|
||||
* to the end of the file.
|
||||
* @return [Number] The current file position.
|
||||
*/
|
||||
getPos(): number;
|
||||
/**
|
||||
* Advance the current file position by the indicated number of positions.
|
||||
* @param [Number] delta
|
||||
*/
|
||||
advancePos(delta: number): number;
|
||||
/**
|
||||
* Set the file position.
|
||||
* @param [Number] newPos
|
||||
*/
|
||||
setPos(newPos: number): number;
|
||||
/**
|
||||
* **Core**: Asynchronous sync. Must be implemented by subclasses of this
|
||||
* class.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous sync.
|
||||
*/
|
||||
syncSync(): void;
|
||||
/**
|
||||
* **Core**: Asynchronous close. Must be implemented by subclasses of this
|
||||
* class.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous close.
|
||||
*/
|
||||
closeSync(): void;
|
||||
/**
|
||||
* Asynchronous `stat`.
|
||||
* @param [Function(BrowserFS.ApiError, BrowserFS.node.fs.Stats)] cb
|
||||
*/
|
||||
stat(cb: BFSCallback<Stats>): void;
|
||||
/**
|
||||
* Synchronous `stat`.
|
||||
*/
|
||||
statSync(): Stats;
|
||||
/**
|
||||
* Asynchronous truncate.
|
||||
* @param [Number] len
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
truncate(len: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous truncate.
|
||||
* @param [Number] len
|
||||
*/
|
||||
truncateSync(len: number): void;
|
||||
/**
|
||||
* Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.write multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param [BrowserFS.node.Buffer] buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param [Number] offset Offset in the buffer to start reading data from.
|
||||
* @param [Number] length The amount of bytes to write to the file.
|
||||
* @param [Number] position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @param [Function(BrowserFS.ApiError, Number, BrowserFS.node.Buffer)]
|
||||
* cb The number specifies the number of bytes written into the file.
|
||||
*/
|
||||
write(buffer: Buffer, offset: number, length: number, position: number, cb: BFSThreeArgCallback<number, Buffer>): void;
|
||||
/**
|
||||
* Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.writeSync multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param [BrowserFS.node.Buffer] buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param [Number] offset Offset in the buffer to start reading data from.
|
||||
* @param [Number] length The amount of bytes to write to the file.
|
||||
* @param [Number] position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @return [Number]
|
||||
*/
|
||||
writeSync(buffer: Buffer, offset: number, length: number, position: number): number;
|
||||
/**
|
||||
* Read data from the file.
|
||||
* @param [BrowserFS.node.Buffer] buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param [Number] offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param [Number] length An integer specifying the number of bytes to read.
|
||||
* @param [Number] position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @param [Function(BrowserFS.ApiError, Number, BrowserFS.node.Buffer)] cb The
|
||||
* number is the number of bytes read
|
||||
*/
|
||||
read(buffer: Buffer, offset: number, length: number, position: number, cb: BFSThreeArgCallback<number, Buffer>): void;
|
||||
/**
|
||||
* Read data from the file.
|
||||
* @param [BrowserFS.node.Buffer] buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param [Number] offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param [Number] length An integer specifying the number of bytes to read.
|
||||
* @param [Number] position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @return [Number]
|
||||
*/
|
||||
readSync(buffer: Buffer, offset: number, length: number, position: number): number;
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param [Number|String] mode
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
chmod(mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param [Number] mode
|
||||
*/
|
||||
chmodSync(mode: number): void;
|
||||
protected isDirty(): boolean;
|
||||
/**
|
||||
* Resets the dirty bit. Should only be called after a sync has completed successfully.
|
||||
*/
|
||||
protected resetDirty(): void;
|
||||
}
|
||||
/**
|
||||
* File class for the InMemory and XHR file systems.
|
||||
* Doesn't sync to anything, so it works nicely for memory-only files.
|
||||
*/
|
||||
export declare class NoSyncFile<T extends FileSystem> extends PreloadFile<T> implements File {
|
||||
constructor(_fs: T, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
/**
|
||||
* Asynchronous sync. Doesn't do anything, simply calls the cb.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous sync. Doesn't do anything.
|
||||
*/
|
||||
syncSync(): void;
|
||||
/**
|
||||
* Asynchronous close. Doesn't do anything, simply calls the cb.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous close. Doesn't do anything.
|
||||
*/
|
||||
closeSync(): void;
|
||||
}
|
||||
@@ -0,0 +1,408 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.NoSyncFile = void 0;
|
||||
var file_1 = require("../core/file");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var node_fs_1 = require("../core/node_fs");
|
||||
var util_1 = require("../core/util");
|
||||
/**
|
||||
* An implementation of the File interface that operates on a file that is
|
||||
* completely in-memory. PreloadFiles are backed by a Buffer.
|
||||
*
|
||||
* This is also an abstract class, as it lacks an implementation of 'sync' and
|
||||
* 'close'. Each filesystem that wishes to use this file representation must
|
||||
* extend this class and implement those two methods.
|
||||
* @todo 'close' lever that disables functionality once closed.
|
||||
*/
|
||||
var PreloadFile = /** @class */ (function (_super) {
|
||||
__extends(PreloadFile, _super);
|
||||
/**
|
||||
* Creates a file with the given path and, optionally, the given contents. Note
|
||||
* that, if contents is specified, it will be mutated by the file!
|
||||
* @param _fs The file system that created the file.
|
||||
* @param _path
|
||||
* @param _mode The mode that the file was opened using.
|
||||
* Dictates permissions and where the file pointer starts.
|
||||
* @param _stat The stats object for the given file.
|
||||
* PreloadFile will mutate this object. Note that this object must contain
|
||||
* the appropriate mode that the file was opened as.
|
||||
* @param contents A buffer containing the entire
|
||||
* contents of the file. PreloadFile will mutate this buffer. If not
|
||||
* specified, we assume it is a new file.
|
||||
*/
|
||||
function PreloadFile(_fs, _path, _flag, _stat, contents) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this._pos = 0;
|
||||
_this._dirty = false;
|
||||
_this._fs = _fs;
|
||||
_this._path = _path;
|
||||
_this._flag = _flag;
|
||||
_this._stat = _stat;
|
||||
_this._buffer = contents ? contents : (0, util_1.emptyBuffer)();
|
||||
// Note: This invariant is *not* maintained once the file starts getting
|
||||
// modified.
|
||||
// Note: Only actually matters if file is readable, as writeable modes may
|
||||
// truncate/append to file.
|
||||
if (_this._stat.size !== _this._buffer.length && _this._flag.isReadable()) {
|
||||
throw new Error("Invalid buffer: Buffer is ".concat(_this._buffer.length, " long, yet Stats object specifies that file is ").concat(_this._stat.size, " long."));
|
||||
}
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* NONSTANDARD: Get the underlying buffer for this file. !!DO NOT MUTATE!! Will mess up dirty tracking.
|
||||
*/
|
||||
PreloadFile.prototype.getBuffer = function () {
|
||||
return this._buffer;
|
||||
};
|
||||
/**
|
||||
* NONSTANDARD: Get underlying stats for this file. !!DO NOT MUTATE!!
|
||||
*/
|
||||
PreloadFile.prototype.getStats = function () {
|
||||
return this._stat;
|
||||
};
|
||||
PreloadFile.prototype.getFlag = function () {
|
||||
return this._flag;
|
||||
};
|
||||
/**
|
||||
* Get the path to this file.
|
||||
* @return [String] The path to the file.
|
||||
*/
|
||||
PreloadFile.prototype.getPath = function () {
|
||||
return this._path;
|
||||
};
|
||||
/**
|
||||
* Get the current file position.
|
||||
*
|
||||
* We emulate the following bug mentioned in the Node documentation:
|
||||
* > On Linux, positional writes don't work when the file is opened in append
|
||||
* mode. The kernel ignores the position argument and always appends the data
|
||||
* to the end of the file.
|
||||
* @return [Number] The current file position.
|
||||
*/
|
||||
PreloadFile.prototype.getPos = function () {
|
||||
if (this._flag.isAppendable()) {
|
||||
return this._stat.size;
|
||||
}
|
||||
return this._pos;
|
||||
};
|
||||
/**
|
||||
* Advance the current file position by the indicated number of positions.
|
||||
* @param [Number] delta
|
||||
*/
|
||||
PreloadFile.prototype.advancePos = function (delta) {
|
||||
return this._pos += delta;
|
||||
};
|
||||
/**
|
||||
* Set the file position.
|
||||
* @param [Number] newPos
|
||||
*/
|
||||
PreloadFile.prototype.setPos = function (newPos) {
|
||||
return this._pos = newPos;
|
||||
};
|
||||
/**
|
||||
* **Core**: Asynchronous sync. Must be implemented by subclasses of this
|
||||
* class.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
PreloadFile.prototype.sync = function (cb) {
|
||||
try {
|
||||
this.syncSync();
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* **Core**: Synchronous sync.
|
||||
*/
|
||||
PreloadFile.prototype.syncSync = function () {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
/**
|
||||
* **Core**: Asynchronous close. Must be implemented by subclasses of this
|
||||
* class.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
PreloadFile.prototype.close = function (cb) {
|
||||
try {
|
||||
this.closeSync();
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* **Core**: Synchronous close.
|
||||
*/
|
||||
PreloadFile.prototype.closeSync = function () {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
/**
|
||||
* Asynchronous `stat`.
|
||||
* @param [Function(BrowserFS.ApiError, BrowserFS.node.fs.Stats)] cb
|
||||
*/
|
||||
PreloadFile.prototype.stat = function (cb) {
|
||||
try {
|
||||
cb(null, node_fs_stats_1.default.clone(this._stat));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Synchronous `stat`.
|
||||
*/
|
||||
PreloadFile.prototype.statSync = function () {
|
||||
return node_fs_stats_1.default.clone(this._stat);
|
||||
};
|
||||
/**
|
||||
* Asynchronous truncate.
|
||||
* @param [Number] len
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
PreloadFile.prototype.truncate = function (len, cb) {
|
||||
try {
|
||||
this.truncateSync(len);
|
||||
if (this._flag.isSynchronous() && !node_fs_1.default.getRootFS().supportsSynch()) {
|
||||
this.sync(cb);
|
||||
}
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
return cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Synchronous truncate.
|
||||
* @param [Number] len
|
||||
*/
|
||||
PreloadFile.prototype.truncateSync = function (len) {
|
||||
this._dirty = true;
|
||||
if (!this._flag.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, 'File not opened with a writeable mode.');
|
||||
}
|
||||
this._stat.mtimeMs = Date.now();
|
||||
if (len > this._buffer.length) {
|
||||
var buf = Buffer.alloc(len - this._buffer.length, 0);
|
||||
// Write will set @_stat.size for us.
|
||||
this.writeSync(buf, 0, buf.length, this._buffer.length);
|
||||
if (this._flag.isSynchronous() && node_fs_1.default.getRootFS().supportsSynch()) {
|
||||
this.syncSync();
|
||||
}
|
||||
return;
|
||||
}
|
||||
this._stat.size = len;
|
||||
// Truncate buffer to 'len'.
|
||||
var newBuff = Buffer.alloc(len);
|
||||
this._buffer.copy(newBuff, 0, 0, len);
|
||||
this._buffer = newBuff;
|
||||
if (this._flag.isSynchronous() && node_fs_1.default.getRootFS().supportsSynch()) {
|
||||
this.syncSync();
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.write multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param [BrowserFS.node.Buffer] buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param [Number] offset Offset in the buffer to start reading data from.
|
||||
* @param [Number] length The amount of bytes to write to the file.
|
||||
* @param [Number] position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @param [Function(BrowserFS.ApiError, Number, BrowserFS.node.Buffer)]
|
||||
* cb The number specifies the number of bytes written into the file.
|
||||
*/
|
||||
PreloadFile.prototype.write = function (buffer, offset, length, position, cb) {
|
||||
try {
|
||||
cb(null, this.writeSync(buffer, offset, length, position), buffer);
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.writeSync multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param [BrowserFS.node.Buffer] buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param [Number] offset Offset in the buffer to start reading data from.
|
||||
* @param [Number] length The amount of bytes to write to the file.
|
||||
* @param [Number] position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @return [Number]
|
||||
*/
|
||||
PreloadFile.prototype.writeSync = function (buffer, offset, length, position) {
|
||||
this._dirty = true;
|
||||
if (position === undefined || position === null) {
|
||||
position = this.getPos();
|
||||
}
|
||||
if (!this._flag.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, 'File not opened with a writeable mode.');
|
||||
}
|
||||
var endFp = position + length;
|
||||
if (endFp > this._stat.size) {
|
||||
this._stat.size = endFp;
|
||||
if (endFp > this._buffer.length) {
|
||||
// Extend the buffer!
|
||||
var newBuff = Buffer.alloc(endFp);
|
||||
this._buffer.copy(newBuff);
|
||||
this._buffer = newBuff;
|
||||
}
|
||||
}
|
||||
var len = buffer.copy(this._buffer, position, offset, offset + length);
|
||||
this._stat.mtimeMs = Date.now();
|
||||
if (this._flag.isSynchronous()) {
|
||||
this.syncSync();
|
||||
return len;
|
||||
}
|
||||
this.setPos(position + len);
|
||||
return len;
|
||||
};
|
||||
/**
|
||||
* Read data from the file.
|
||||
* @param [BrowserFS.node.Buffer] buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param [Number] offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param [Number] length An integer specifying the number of bytes to read.
|
||||
* @param [Number] position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @param [Function(BrowserFS.ApiError, Number, BrowserFS.node.Buffer)] cb The
|
||||
* number is the number of bytes read
|
||||
*/
|
||||
PreloadFile.prototype.read = function (buffer, offset, length, position, cb) {
|
||||
try {
|
||||
cb(null, this.readSync(buffer, offset, length, position), buffer);
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Read data from the file.
|
||||
* @param [BrowserFS.node.Buffer] buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param [Number] offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param [Number] length An integer specifying the number of bytes to read.
|
||||
* @param [Number] position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @return [Number]
|
||||
*/
|
||||
PreloadFile.prototype.readSync = function (buffer, offset, length, position) {
|
||||
if (!this._flag.isReadable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, 'File not opened with a readable mode.');
|
||||
}
|
||||
if (position === undefined || position === null) {
|
||||
position = this.getPos();
|
||||
}
|
||||
var endRead = position + length;
|
||||
if (endRead > this._stat.size) {
|
||||
length = this._stat.size - position;
|
||||
}
|
||||
var rv = this._buffer.copy(buffer, offset, position, position + length);
|
||||
this._stat.atimeMs = Date.now();
|
||||
this._pos = position + length;
|
||||
return rv;
|
||||
};
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param [Number|String] mode
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
PreloadFile.prototype.chmod = function (mode, cb) {
|
||||
try {
|
||||
this.chmodSync(mode);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param [Number] mode
|
||||
*/
|
||||
PreloadFile.prototype.chmodSync = function (mode) {
|
||||
if (!this._fs.supportsProps()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
}
|
||||
this._dirty = true;
|
||||
this._stat.chmod(mode);
|
||||
this.syncSync();
|
||||
};
|
||||
PreloadFile.prototype.isDirty = function () {
|
||||
return this._dirty;
|
||||
};
|
||||
/**
|
||||
* Resets the dirty bit. Should only be called after a sync has completed successfully.
|
||||
*/
|
||||
PreloadFile.prototype.resetDirty = function () {
|
||||
this._dirty = false;
|
||||
};
|
||||
return PreloadFile;
|
||||
}(file_1.BaseFile));
|
||||
exports.default = PreloadFile;
|
||||
/**
|
||||
* File class for the InMemory and XHR file systems.
|
||||
* Doesn't sync to anything, so it works nicely for memory-only files.
|
||||
*/
|
||||
var NoSyncFile = /** @class */ (function (_super) {
|
||||
__extends(NoSyncFile, _super);
|
||||
function NoSyncFile(_fs, _path, _flag, _stat, contents) {
|
||||
return _super.call(this, _fs, _path, _flag, _stat, contents) || this;
|
||||
}
|
||||
/**
|
||||
* Asynchronous sync. Doesn't do anything, simply calls the cb.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
NoSyncFile.prototype.sync = function (cb) {
|
||||
cb();
|
||||
};
|
||||
/**
|
||||
* Synchronous sync. Doesn't do anything.
|
||||
*/
|
||||
NoSyncFile.prototype.syncSync = function () {
|
||||
// NOP.
|
||||
};
|
||||
/**
|
||||
* Asynchronous close. Doesn't do anything, simply calls the cb.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
NoSyncFile.prototype.close = function (cb) {
|
||||
cb();
|
||||
};
|
||||
/**
|
||||
* Synchronous close. Doesn't do anything.
|
||||
*/
|
||||
NoSyncFile.prototype.closeSync = function () {
|
||||
// NOP.
|
||||
};
|
||||
return NoSyncFile;
|
||||
}(PreloadFile));
|
||||
exports.NoSyncFile = NoSyncFile;
|
||||
//# sourceMappingURL=preload_file.js.map
|
||||
5
sandpack-generated/static/browserfs11/node/generic/setImmediate.d.ts
vendored
Normal file
5
sandpack-generated/static/browserfs11/node/generic/setImmediate.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
declare let bfsSetImmediate: (cb: Function, ...args: any[]) => any;
|
||||
export default bfsSetImmediate;
|
||||
@@ -0,0 +1,96 @@
|
||||
"use strict";
|
||||
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
|
||||
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
||||
if (ar || !(i in from)) {
|
||||
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
||||
ar[i] = from[i];
|
||||
}
|
||||
}
|
||||
return to.concat(ar || Array.prototype.slice.call(from));
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var global_1 = require("../core/global");
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var bfsSetImmediate;
|
||||
if (typeof (setImmediate) !== "undefined") {
|
||||
bfsSetImmediate = setImmediate;
|
||||
}
|
||||
else {
|
||||
var gScope_1 = global_1.default;
|
||||
var timeouts_1 = [];
|
||||
var messageName_1 = "zero-timeout-message";
|
||||
var canUsePostMessage = function () {
|
||||
if (typeof gScope_1.importScripts !== 'undefined' || !gScope_1.postMessage) {
|
||||
return false;
|
||||
}
|
||||
var postMessageIsAsync = true;
|
||||
var oldOnMessage = gScope_1.onmessage;
|
||||
gScope_1.onmessage = function () {
|
||||
postMessageIsAsync = false;
|
||||
};
|
||||
gScope_1.postMessage('', '*');
|
||||
gScope_1.onmessage = oldOnMessage;
|
||||
return postMessageIsAsync;
|
||||
};
|
||||
if (canUsePostMessage()) {
|
||||
bfsSetImmediate = function (fn) {
|
||||
var args = [];
|
||||
for (var _i = 1; _i < arguments.length; _i++) {
|
||||
args[_i - 1] = arguments[_i];
|
||||
}
|
||||
timeouts_1.push({ fn: fn, args: args });
|
||||
gScope_1.postMessage(messageName_1, "*");
|
||||
};
|
||||
var handleMessage = function (event) {
|
||||
if (event.source === self && event.data === messageName_1) {
|
||||
if (event.stopPropagation) {
|
||||
event.stopPropagation();
|
||||
}
|
||||
else {
|
||||
event.cancelBubble = true;
|
||||
}
|
||||
if (timeouts_1.length > 0) {
|
||||
var _a = timeouts_1.shift(), fn = _a.fn, args = _a.args;
|
||||
return fn.apply(void 0, args);
|
||||
}
|
||||
}
|
||||
};
|
||||
if (gScope_1.addEventListener) {
|
||||
gScope_1.addEventListener('message', handleMessage, true);
|
||||
}
|
||||
else {
|
||||
gScope_1.attachEvent('onmessage', handleMessage);
|
||||
}
|
||||
}
|
||||
else if (gScope_1.MessageChannel) {
|
||||
// WebWorker MessageChannel
|
||||
var channel_1 = new gScope_1.MessageChannel();
|
||||
channel_1.port1.onmessage = function (event) {
|
||||
if (timeouts_1.length > 0) {
|
||||
var _a = timeouts_1.shift(), fn = _a.fn, args = _a.args;
|
||||
return fn.apply(void 0, args);
|
||||
}
|
||||
};
|
||||
bfsSetImmediate = function (fn) {
|
||||
var args = [];
|
||||
for (var _i = 1; _i < arguments.length; _i++) {
|
||||
args[_i - 1] = arguments[_i];
|
||||
}
|
||||
timeouts_1.push({ fn: fn, args: args });
|
||||
channel_1.port2.postMessage('');
|
||||
};
|
||||
}
|
||||
else {
|
||||
bfsSetImmediate = function (fn) {
|
||||
var args = [];
|
||||
for (var _i = 1; _i < arguments.length; _i++) {
|
||||
args[_i - 1] = arguments[_i];
|
||||
}
|
||||
return setTimeout.apply(void 0, __spreadArray([fn, 0], args, false));
|
||||
};
|
||||
}
|
||||
}
|
||||
exports.default = bfsSetImmediate;
|
||||
//# sourceMappingURL=setImmediate.js.map
|
||||
42
sandpack-generated/static/browserfs11/node/generic/xhr.d.ts
vendored
Normal file
42
sandpack-generated/static/browserfs11/node/generic/xhr.d.ts
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Contains utility methods for performing a variety of tasks with
|
||||
* XmlHttpRequest across browsers.
|
||||
*/
|
||||
/// <reference types="node" />
|
||||
import { ApiError } from '../core/api_error';
|
||||
import { BFSCallback } from '../core/file_system';
|
||||
export declare const xhrIsAvailable: boolean;
|
||||
/**
|
||||
* Asynchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
export declare let asyncDownloadFile: {
|
||||
(p: string, type: 'buffer', cb: BFSCallback<Buffer>): void;
|
||||
(p: string, type: 'json', cb: BFSCallback<any>): void;
|
||||
(p: string, type: string, cb: BFSCallback<any>): void;
|
||||
};
|
||||
/**
|
||||
* Synchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
export declare let syncDownloadFile: {
|
||||
(p: string, type: 'buffer'): Buffer;
|
||||
(p: string, type: 'json'): any;
|
||||
(p: string, type: string): any;
|
||||
};
|
||||
/**
|
||||
* Synchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function getFileSizeSync(p: string): number;
|
||||
/**
|
||||
* Asynchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function getFileSizeAsync(p: string, cb: (err: ApiError, size?: number) => void): void;
|
||||
199
sandpack-generated/static/browserfs11/node/generic/xhr.js
Normal file
199
sandpack-generated/static/browserfs11/node/generic/xhr.js
Normal file
@@ -0,0 +1,199 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Contains utility methods for performing a variety of tasks with
|
||||
* XmlHttpRequest across browsers.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getFileSizeAsync = exports.getFileSizeSync = exports.syncDownloadFile = exports.asyncDownloadFile = exports.xhrIsAvailable = void 0;
|
||||
var util_1 = require("../core/util");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
exports.xhrIsAvailable = (typeof (XMLHttpRequest) !== "undefined" && XMLHttpRequest !== null);
|
||||
function asyncDownloadFileModern(p, type, cb) {
|
||||
var req = new XMLHttpRequest();
|
||||
req.open('GET', p, true);
|
||||
var jsonSupported = true;
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
req.responseType = 'arraybuffer';
|
||||
break;
|
||||
case 'json':
|
||||
// Some browsers don't support the JSON response type.
|
||||
// They either reset responseType, or throw an exception.
|
||||
// @see https://github.com/Modernizr/Modernizr/blob/master/src/testXhrType.js
|
||||
try {
|
||||
req.responseType = 'json';
|
||||
jsonSupported = req.responseType === 'json';
|
||||
}
|
||||
catch (e) {
|
||||
jsonSupported = false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid download type: " + type));
|
||||
}
|
||||
req.onreadystatechange = function (e) {
|
||||
if (req.readyState === 4) {
|
||||
if (req.status === 200) {
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
// XXX: WebKit-based browsers return *null* when XHRing an empty file.
|
||||
return cb(null, req.response ? Buffer.from(req.response) : (0, util_1.emptyBuffer)());
|
||||
case 'json':
|
||||
if (jsonSupported) {
|
||||
return cb(null, req.response);
|
||||
}
|
||||
else {
|
||||
return cb(null, JSON.parse(req.responseText));
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR error: response returned code ".concat(req.status)));
|
||||
}
|
||||
}
|
||||
};
|
||||
req.send();
|
||||
}
|
||||
function syncDownloadFileModern(p, type) {
|
||||
var req = new XMLHttpRequest();
|
||||
req.open('GET', p, false);
|
||||
// On most platforms, we cannot set the responseType of synchronous downloads.
|
||||
// @todo Test for this; IE10 allows this, as do older versions of Chrome/FF.
|
||||
var data = null;
|
||||
var err = null;
|
||||
// Classic hack to download binary data as a string.
|
||||
req.overrideMimeType('text/plain; charset=x-user-defined');
|
||||
req.onreadystatechange = function (e) {
|
||||
if (req.readyState === 4) {
|
||||
if (req.status === 200) {
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
// Convert the text into a buffer.
|
||||
var text = req.responseText;
|
||||
data = Buffer.alloc(text.length);
|
||||
// Throw away the upper bits of each character.
|
||||
for (var i = 0; i < text.length; i++) {
|
||||
// This will automatically throw away the upper bit of each
|
||||
// character for us.
|
||||
data[i] = text.charCodeAt(i);
|
||||
}
|
||||
return;
|
||||
case 'json':
|
||||
data = JSON.parse(req.responseText);
|
||||
return;
|
||||
}
|
||||
}
|
||||
else {
|
||||
err = new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR error: response returned code ".concat(req.status));
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
req.send();
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
function syncDownloadFileIE10(p, type) {
|
||||
var req = new XMLHttpRequest();
|
||||
req.open('GET', p, false);
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
req.responseType = 'arraybuffer';
|
||||
break;
|
||||
case 'json':
|
||||
// IE10 does not support the JSON type.
|
||||
break;
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid download type: " + type);
|
||||
}
|
||||
var data;
|
||||
var err;
|
||||
req.onreadystatechange = function (e) {
|
||||
if (req.readyState === 4) {
|
||||
if (req.status === 200) {
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
data = Buffer.from(req.response);
|
||||
break;
|
||||
case 'json':
|
||||
data = JSON.parse(req.response);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
err = new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR error: response returned code ".concat(req.status));
|
||||
}
|
||||
}
|
||||
};
|
||||
req.send();
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function getFileSize(async, p, cb) {
|
||||
var req = new XMLHttpRequest();
|
||||
req.open('HEAD', p, async);
|
||||
req.onreadystatechange = function (e) {
|
||||
if (req.readyState === 4) {
|
||||
if (req.status === 200) {
|
||||
try {
|
||||
return cb(null, parseInt(req.getResponseHeader('Content-Length') || '-1', 10));
|
||||
}
|
||||
catch (e) {
|
||||
// In the event that the header isn't present or there is an error...
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR HEAD error: Could not read content-length."));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR HEAD error: response returned code ".concat(req.status)));
|
||||
}
|
||||
}
|
||||
};
|
||||
req.send();
|
||||
}
|
||||
/**
|
||||
* Asynchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
exports.asyncDownloadFile = asyncDownloadFileModern;
|
||||
/**
|
||||
* Synchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
exports.syncDownloadFile = (util_1.isIE && typeof Blob !== 'undefined') ? syncDownloadFileIE10 : syncDownloadFileModern;
|
||||
/**
|
||||
* Synchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
function getFileSizeSync(p) {
|
||||
var rv = -1;
|
||||
getFileSize(false, p, function (err, size) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
rv = size;
|
||||
});
|
||||
return rv;
|
||||
}
|
||||
exports.getFileSizeSync = getFileSizeSync;
|
||||
/**
|
||||
* Asynchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
function getFileSizeAsync(p, cb) {
|
||||
getFileSize(true, p, cb);
|
||||
}
|
||||
exports.getFileSizeAsync = getFileSizeAsync;
|
||||
//# sourceMappingURL=xhr.js.map
|
||||
Reference in New Issue
Block a user