[generated] sandpack files from: codesandbox-client
This commit is contained in:
103
sandpack-generated/static/browserfs11/node/backend/AsyncMirror.d.ts
vendored
Normal file
103
sandpack-generated/static/browserfs11/node/backend/AsyncMirror.d.ts
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
import { FileSystem, SynchronousFileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { File } from '../core/file';
|
||||
import Stats from '../core/node_fs_stats';
|
||||
import PreloadFile from '../generic/preload_file';
|
||||
/**
|
||||
* Configuration options for the AsyncMirror file system.
|
||||
*/
|
||||
export interface AsyncMirrorOptions {
|
||||
sync: FileSystem;
|
||||
async: FileSystem;
|
||||
}
|
||||
/**
|
||||
* AsyncMirrorFS mirrors a synchronous filesystem into an asynchronous filesystem
|
||||
* by:
|
||||
*
|
||||
* * Performing operations over the in-memory copy, while asynchronously pipelining them
|
||||
* to the backing store.
|
||||
* * During application loading, the contents of the async file system can be reloaded into
|
||||
* the synchronous store, if desired.
|
||||
*
|
||||
* The two stores will be kept in sync. The most common use-case is to pair a synchronous
|
||||
* in-memory filesystem with an asynchronous backing store.
|
||||
*
|
||||
* Example: Mirroring an IndexedDB file system to an in memory file system. Now, you can use
|
||||
* IndexedDB synchronously.
|
||||
*
|
||||
* ```javascript
|
||||
* BrowserFS.configure({
|
||||
* fs: "AsyncMirror",
|
||||
* options: {
|
||||
* sync: { fs: "InMemory" },
|
||||
* async: { fs: "IndexedDB" }
|
||||
* }
|
||||
* }, function(e) {
|
||||
* // BrowserFS is initialized and ready-to-use!
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* Or, alternatively:
|
||||
*
|
||||
* ```javascript
|
||||
* BrowserFS.FileSystem.IndexedDB.Create(function(e, idbfs) {
|
||||
* BrowserFS.FileSystem.InMemory.Create(function(e, inMemory) {
|
||||
* BrowserFS.FileSystem.AsyncMirror({
|
||||
* sync: inMemory, async: idbfs
|
||||
* }, function(e, mirrored) {
|
||||
* BrowserFS.initialize(mirrored);
|
||||
* });
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export default class AsyncMirror extends SynchronousFileSystem implements FileSystem {
|
||||
static readonly Name = "AsyncMirror";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Constructs and initializes an AsyncMirror file system with the given options.
|
||||
*/
|
||||
static Create(opts: AsyncMirrorOptions, cb: BFSCallback<AsyncMirror>): void;
|
||||
static isAvailable(): boolean;
|
||||
/**
|
||||
* Queue of pending asynchronous operations.
|
||||
*/
|
||||
private _queue;
|
||||
private _queueRunning;
|
||||
private _sync;
|
||||
private _async;
|
||||
private _isInitialized;
|
||||
private _initializeCallbacks;
|
||||
/**
|
||||
* **Deprecated; use AsyncMirror.Create() method instead.**
|
||||
*
|
||||
* Mirrors the synchronous file system into the asynchronous file system.
|
||||
*
|
||||
* **IMPORTANT**: You must call `initialize` on the file system before it can be used.
|
||||
* @param sync The synchronous file system to mirror the asynchronous file system to.
|
||||
* @param async The asynchronous file system to mirror.
|
||||
*/
|
||||
constructor(sync: FileSystem, async: FileSystem);
|
||||
getName(): string;
|
||||
_syncSync(fd: PreloadFile<any>): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
statSync(p: string, isLstat: boolean): Stats;
|
||||
openSync(p: string, flag: FileFlag, mode: number): File;
|
||||
unlinkSync(p: string): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
readdirSync(p: string): string[];
|
||||
existsSync(p: string): boolean;
|
||||
chmodSync(p: string, isLchmod: boolean, mode: number): void;
|
||||
chownSync(p: string, isLchown: boolean, uid: number, gid: number): void;
|
||||
utimesSync(p: string, atime: Date, mtime: Date): void;
|
||||
/**
|
||||
* Called once to load up files from async storage into sync storage.
|
||||
*/
|
||||
private _initialize;
|
||||
private enqueueOp;
|
||||
}
|
||||
@@ -0,0 +1,329 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var file_flag_1 = require("../core/file_flag");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var path = require("path");
|
||||
/**
|
||||
* We define our own file to interpose on syncSync() for mirroring purposes.
|
||||
*/
|
||||
var MirrorFile = /** @class */ (function (_super) {
|
||||
__extends(MirrorFile, _super);
|
||||
function MirrorFile(fs, path, flag, stat, data) {
|
||||
return _super.call(this, fs, path, flag, stat, data) || this;
|
||||
}
|
||||
MirrorFile.prototype.syncSync = function () {
|
||||
if (this.isDirty()) {
|
||||
this._fs._syncSync(this);
|
||||
this.resetDirty();
|
||||
}
|
||||
};
|
||||
MirrorFile.prototype.closeSync = function () {
|
||||
this.syncSync();
|
||||
};
|
||||
return MirrorFile;
|
||||
}(preload_file_1.default));
|
||||
/**
|
||||
* AsyncMirrorFS mirrors a synchronous filesystem into an asynchronous filesystem
|
||||
* by:
|
||||
*
|
||||
* * Performing operations over the in-memory copy, while asynchronously pipelining them
|
||||
* to the backing store.
|
||||
* * During application loading, the contents of the async file system can be reloaded into
|
||||
* the synchronous store, if desired.
|
||||
*
|
||||
* The two stores will be kept in sync. The most common use-case is to pair a synchronous
|
||||
* in-memory filesystem with an asynchronous backing store.
|
||||
*
|
||||
* Example: Mirroring an IndexedDB file system to an in memory file system. Now, you can use
|
||||
* IndexedDB synchronously.
|
||||
*
|
||||
* ```javascript
|
||||
* BrowserFS.configure({
|
||||
* fs: "AsyncMirror",
|
||||
* options: {
|
||||
* sync: { fs: "InMemory" },
|
||||
* async: { fs: "IndexedDB" }
|
||||
* }
|
||||
* }, function(e) {
|
||||
* // BrowserFS is initialized and ready-to-use!
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* Or, alternatively:
|
||||
*
|
||||
* ```javascript
|
||||
* BrowserFS.FileSystem.IndexedDB.Create(function(e, idbfs) {
|
||||
* BrowserFS.FileSystem.InMemory.Create(function(e, inMemory) {
|
||||
* BrowserFS.FileSystem.AsyncMirror({
|
||||
* sync: inMemory, async: idbfs
|
||||
* }, function(e, mirrored) {
|
||||
* BrowserFS.initialize(mirrored);
|
||||
* });
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
var AsyncMirror = /** @class */ (function (_super) {
|
||||
__extends(AsyncMirror, _super);
|
||||
/**
|
||||
* **Deprecated; use AsyncMirror.Create() method instead.**
|
||||
*
|
||||
* Mirrors the synchronous file system into the asynchronous file system.
|
||||
*
|
||||
* **IMPORTANT**: You must call `initialize` on the file system before it can be used.
|
||||
* @param sync The synchronous file system to mirror the asynchronous file system to.
|
||||
* @param async The asynchronous file system to mirror.
|
||||
*/
|
||||
function AsyncMirror(sync, async) {
|
||||
var _this = _super.call(this) || this;
|
||||
/**
|
||||
* Queue of pending asynchronous operations.
|
||||
*/
|
||||
_this._queue = [];
|
||||
_this._queueRunning = false;
|
||||
_this._isInitialized = false;
|
||||
_this._initializeCallbacks = [];
|
||||
_this._sync = sync;
|
||||
_this._async = async;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Constructs and initializes an AsyncMirror file system with the given options.
|
||||
*/
|
||||
AsyncMirror.Create = function (opts, cb) {
|
||||
try {
|
||||
var fs_1 = new AsyncMirror(opts.sync, opts.async);
|
||||
fs_1._initialize(function (e) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
cb(null, fs_1);
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
AsyncMirror.isAvailable = function () {
|
||||
return true;
|
||||
};
|
||||
AsyncMirror.prototype.getName = function () {
|
||||
return AsyncMirror.Name;
|
||||
};
|
||||
AsyncMirror.prototype._syncSync = function (fd) {
|
||||
this._sync.writeFileSync(fd.getPath(), fd.getBuffer(), null, file_flag_1.FileFlag.getFileFlag('w'), fd.getStats().mode);
|
||||
this.enqueueOp({
|
||||
apiMethod: 'writeFile',
|
||||
arguments: [fd.getPath(), fd.getBuffer(), null, fd.getFlag(), fd.getStats().mode]
|
||||
});
|
||||
};
|
||||
AsyncMirror.prototype.isReadOnly = function () { return false; };
|
||||
AsyncMirror.prototype.supportsSynch = function () { return true; };
|
||||
AsyncMirror.prototype.supportsLinks = function () { return false; };
|
||||
AsyncMirror.prototype.supportsProps = function () { return this._sync.supportsProps() && this._async.supportsProps(); };
|
||||
AsyncMirror.prototype.renameSync = function (oldPath, newPath) {
|
||||
this._sync.renameSync(oldPath, newPath);
|
||||
this.enqueueOp({
|
||||
apiMethod: 'rename',
|
||||
arguments: [oldPath, newPath]
|
||||
});
|
||||
};
|
||||
AsyncMirror.prototype.statSync = function (p, isLstat) {
|
||||
return this._sync.statSync(p, isLstat);
|
||||
};
|
||||
AsyncMirror.prototype.openSync = function (p, flag, mode) {
|
||||
// Sanity check: Is this open/close permitted?
|
||||
var fd = this._sync.openSync(p, flag, mode);
|
||||
fd.closeSync();
|
||||
return new MirrorFile(this, p, flag, this._sync.statSync(p, false), this._sync.readFileSync(p, null, file_flag_1.FileFlag.getFileFlag('r')));
|
||||
};
|
||||
AsyncMirror.prototype.unlinkSync = function (p) {
|
||||
this._sync.unlinkSync(p);
|
||||
this.enqueueOp({
|
||||
apiMethod: 'unlink',
|
||||
arguments: [p]
|
||||
});
|
||||
};
|
||||
AsyncMirror.prototype.rmdirSync = function (p) {
|
||||
this._sync.rmdirSync(p);
|
||||
this.enqueueOp({
|
||||
apiMethod: 'rmdir',
|
||||
arguments: [p]
|
||||
});
|
||||
};
|
||||
AsyncMirror.prototype.mkdirSync = function (p, mode) {
|
||||
this._sync.mkdirSync(p, mode);
|
||||
this.enqueueOp({
|
||||
apiMethod: 'mkdir',
|
||||
arguments: [p, mode]
|
||||
});
|
||||
};
|
||||
AsyncMirror.prototype.readdirSync = function (p) {
|
||||
return this._sync.readdirSync(p);
|
||||
};
|
||||
AsyncMirror.prototype.existsSync = function (p) {
|
||||
return this._sync.existsSync(p);
|
||||
};
|
||||
AsyncMirror.prototype.chmodSync = function (p, isLchmod, mode) {
|
||||
this._sync.chmodSync(p, isLchmod, mode);
|
||||
this.enqueueOp({
|
||||
apiMethod: 'chmod',
|
||||
arguments: [p, isLchmod, mode]
|
||||
});
|
||||
};
|
||||
AsyncMirror.prototype.chownSync = function (p, isLchown, uid, gid) {
|
||||
this._sync.chownSync(p, isLchown, uid, gid);
|
||||
this.enqueueOp({
|
||||
apiMethod: 'chown',
|
||||
arguments: [p, isLchown, uid, gid]
|
||||
});
|
||||
};
|
||||
AsyncMirror.prototype.utimesSync = function (p, atime, mtime) {
|
||||
this._sync.utimesSync(p, atime, mtime);
|
||||
this.enqueueOp({
|
||||
apiMethod: 'utimes',
|
||||
arguments: [p, atime, mtime]
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Called once to load up files from async storage into sync storage.
|
||||
*/
|
||||
AsyncMirror.prototype._initialize = function (userCb) {
|
||||
var _this = this;
|
||||
var callbacks = this._initializeCallbacks;
|
||||
var end = function (e) {
|
||||
_this._isInitialized = !e;
|
||||
_this._initializeCallbacks = [];
|
||||
callbacks.forEach(function (cb) { return cb(e); });
|
||||
};
|
||||
if (!this._isInitialized) {
|
||||
// First call triggers initialization, the rest wait.
|
||||
if (callbacks.push(userCb) === 1) {
|
||||
var copyDirectory_1 = function (p, mode, cb) {
|
||||
if (p !== '/') {
|
||||
_this._sync.mkdirSync(p, mode);
|
||||
}
|
||||
_this._async.readdir(p, function (err, files) {
|
||||
var i = 0;
|
||||
// NOTE: This function must not be in a lexically nested statement,
|
||||
// such as an if or while statement. Safari refuses to run the
|
||||
// script since it is undefined behavior.
|
||||
function copyNextFile(err) {
|
||||
if (err) {
|
||||
cb(err);
|
||||
}
|
||||
else if (i < files.length) {
|
||||
copyItem_1(path.join(p, files[i]), copyNextFile);
|
||||
i++;
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
}
|
||||
if (err) {
|
||||
cb(err);
|
||||
}
|
||||
else {
|
||||
copyNextFile();
|
||||
}
|
||||
});
|
||||
}, copyFile_1 = function (p, mode, cb) {
|
||||
_this._async.readFile(p, null, file_flag_1.FileFlag.getFileFlag('r'), function (err, data) {
|
||||
if (err) {
|
||||
cb(err);
|
||||
}
|
||||
else {
|
||||
try {
|
||||
_this._sync.writeFileSync(p, data, null, file_flag_1.FileFlag.getFileFlag('w'), mode);
|
||||
}
|
||||
catch (e) {
|
||||
err = e;
|
||||
}
|
||||
finally {
|
||||
cb(err);
|
||||
}
|
||||
}
|
||||
});
|
||||
}, copyItem_1 = function (p, cb) {
|
||||
_this._async.stat(p, false, function (err, stats) {
|
||||
if (err) {
|
||||
cb(err);
|
||||
}
|
||||
else if (stats.isDirectory()) {
|
||||
copyDirectory_1(p, stats.mode, cb);
|
||||
}
|
||||
else {
|
||||
copyFile_1(p, stats.mode, cb);
|
||||
}
|
||||
});
|
||||
};
|
||||
copyDirectory_1('/', 0, end);
|
||||
}
|
||||
}
|
||||
else {
|
||||
userCb();
|
||||
}
|
||||
};
|
||||
AsyncMirror.prototype.enqueueOp = function (op) {
|
||||
var _this = this;
|
||||
this._queue.push(op);
|
||||
if (!this._queueRunning) {
|
||||
this._queueRunning = true;
|
||||
var doNextOp_1 = function (err) {
|
||||
if (err) {
|
||||
throw new Error("WARNING: File system has desynchronized. Received following error: ".concat(err, "\n$"));
|
||||
}
|
||||
if (_this._queue.length > 0) {
|
||||
var op_1 = _this._queue.shift(), args = op_1.arguments;
|
||||
args.push(doNextOp_1);
|
||||
_this._async[op_1.apiMethod].apply(_this._async, args);
|
||||
}
|
||||
else {
|
||||
_this._queueRunning = false;
|
||||
}
|
||||
};
|
||||
doNextOp_1();
|
||||
}
|
||||
};
|
||||
AsyncMirror.Name = "AsyncMirror";
|
||||
AsyncMirror.Options = {
|
||||
sync: {
|
||||
type: "object",
|
||||
description: "The synchronous file system to mirror the asynchronous file system to.",
|
||||
validator: function (v, cb) {
|
||||
if (v && typeof (v['supportsSynch']) === "function" && v.supportsSynch()) {
|
||||
cb();
|
||||
}
|
||||
else {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "'sync' option must be a file system that supports synchronous operations"));
|
||||
}
|
||||
}
|
||||
},
|
||||
async: {
|
||||
type: "object",
|
||||
description: "The asynchronous file system to mirror."
|
||||
}
|
||||
};
|
||||
return AsyncMirror;
|
||||
}(file_system_1.SynchronousFileSystem));
|
||||
exports.default = AsyncMirror;
|
||||
//# sourceMappingURL=AsyncMirror.js.map
|
||||
102
sandpack-generated/static/browserfs11/node/backend/BundledHTTPRequest.d.ts
vendored
Normal file
102
sandpack-generated/static/browserfs11/node/backend/BundledHTTPRequest.d.ts
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFileSystem, FileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { File } from '../core/file';
|
||||
import Stats from '../core/node_fs_stats';
|
||||
/**
|
||||
* Configuration options for a HTTPRequest file system.
|
||||
*/
|
||||
export interface HTTPRequestOptions {
|
||||
index?: string | object;
|
||||
bundle?: string | object;
|
||||
baseUrl?: string;
|
||||
preferXHR?: boolean;
|
||||
logReads?: boolean;
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
export default class BundledHTTPRequest extends BaseFileSystem implements FileSystem {
|
||||
static readonly Name = "BundledHTTPRequest";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Construct an HTTPRequest file system backend with the given options.
|
||||
*/
|
||||
static Create(opts: HTTPRequestOptions, cb: BFSCallback<BundledHTTPRequest>): void;
|
||||
static isAvailable(): boolean;
|
||||
readonly prefixUrl: string;
|
||||
private _logReads;
|
||||
private _index;
|
||||
private _requestFileAsyncInternal;
|
||||
private _requestFileSizeAsyncInternal;
|
||||
private _requestFileSyncInternal;
|
||||
private _requestFileSizeSyncInternal;
|
||||
private constructor();
|
||||
empty(): void;
|
||||
getName(): string;
|
||||
diskSpace(path: string, cb: (total: number, free: number) => void): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
private logRead;
|
||||
/**
|
||||
* Special HTTPFS function: Preload the given file into the index.
|
||||
* @param [String] path
|
||||
* @param [BrowserFS.Buffer] buffer
|
||||
*/
|
||||
preloadFile(path: string, buffer: Buffer): void;
|
||||
stat(path: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
statSync(path: string, isLstat: boolean): Stats;
|
||||
open(path: string, flags: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openSync(path: string, flags: FileFlag, mode: number): File;
|
||||
readdir(path: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(path: string): string[];
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
readFile(fname: string, encoding: string, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
private _getHTTPPath;
|
||||
/**
|
||||
* Asynchronously download the given file.
|
||||
*/
|
||||
private _requestFileAsync;
|
||||
/**
|
||||
* Synchronously download the given file.
|
||||
*/
|
||||
private _requestFileSync;
|
||||
/**
|
||||
* Only requests the HEAD content, for the file size.
|
||||
*/
|
||||
private _requestFileSizeAsync;
|
||||
private _requestFileSizeSync;
|
||||
}
|
||||
@@ -0,0 +1,483 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var file_flag_1 = require("../core/file_flag");
|
||||
var util_1 = require("../core/util");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var xhr_1 = require("../generic/xhr");
|
||||
var fetch_1 = require("../generic/fetch");
|
||||
var file_index_1 = require("../generic/file_index");
|
||||
/**
|
||||
* Try to convert the given buffer into a string, and pass it to the callback.
|
||||
* Optimization that removes the needed try/catch into a helper function, as
|
||||
* this is an uncommon case.
|
||||
* @hidden
|
||||
*/
|
||||
function tryToString(buff, encoding, cb) {
|
||||
try {
|
||||
cb(null, buff.toString(encoding));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
}
|
||||
function syncNotAvailableError() {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP, "Synchronous HTTP download methods are not available in this environment.");
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
var BundledHTTPRequest = /** @class */ (function (_super) {
|
||||
__extends(BundledHTTPRequest, _super);
|
||||
function BundledHTTPRequest(index, bundle, prefixUrl, preferXHR, logReads) {
|
||||
if (bundle === void 0) { bundle = {}; }
|
||||
if (prefixUrl === void 0) { prefixUrl = ''; }
|
||||
if (preferXHR === void 0) { preferXHR = false; }
|
||||
if (logReads === void 0) { logReads = false; }
|
||||
var _this = _super.call(this) || this;
|
||||
// prefix_url must end in a directory separator.
|
||||
if (prefixUrl.length > 0 && prefixUrl.charAt(prefixUrl.length - 1) !== '/') {
|
||||
prefixUrl = prefixUrl + '/';
|
||||
}
|
||||
_this.prefixUrl = prefixUrl;
|
||||
_this._logReads = logReads;
|
||||
_this._index = file_index_1.FileIndex.fromListing(index);
|
||||
_this._index.fileIterator(function (file, path) {
|
||||
var bundleInfo = bundle[path];
|
||||
if (bundleInfo !== undefined) {
|
||||
if (typeof bundleInfo === 'number') {
|
||||
file.size = bundleInfo;
|
||||
}
|
||||
else if (!file.fileData) {
|
||||
var buffer = new Buffer(bundleInfo);
|
||||
file.size = buffer.length;
|
||||
file.fileData = buffer;
|
||||
}
|
||||
}
|
||||
});
|
||||
if (fetch_1.fetchIsAvailable && (!preferXHR || !xhr_1.xhrIsAvailable)) {
|
||||
_this._requestFileAsyncInternal = fetch_1.fetchFileAsync;
|
||||
_this._requestFileSizeAsyncInternal = fetch_1.fetchFileSizeAsync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileAsyncInternal = xhr_1.asyncDownloadFile;
|
||||
_this._requestFileSizeAsyncInternal = xhr_1.getFileSizeAsync;
|
||||
}
|
||||
if (xhr_1.xhrIsAvailable) {
|
||||
_this._requestFileSyncInternal = xhr_1.syncDownloadFile;
|
||||
_this._requestFileSizeSyncInternal = xhr_1.getFileSizeSync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileSyncInternal = syncNotAvailableError;
|
||||
_this._requestFileSizeSyncInternal = syncNotAvailableError;
|
||||
}
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Construct an HTTPRequest file system backend with the given options.
|
||||
*/
|
||||
BundledHTTPRequest.Create = function (opts, cb) {
|
||||
if (opts.index === undefined) {
|
||||
opts.index = "index.json";
|
||||
}
|
||||
if (typeof (opts.index) === "string") {
|
||||
(0, xhr_1.asyncDownloadFile)(opts.index, "json", function (e, data) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
if (typeof opts.bundle === 'string') {
|
||||
(0, xhr_1.asyncDownloadFile)(opts.bundle, "json", function (e, bundleData) {
|
||||
if (e) {
|
||||
console.error("Couldn't preload bundle", e);
|
||||
}
|
||||
cb(null, new BundledHTTPRequest(data, bundleData || {}, opts.baseUrl, opts.preferXHR, opts.logReads));
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, new BundledHTTPRequest(data, (opts.bundle || {}), opts.baseUrl, opts.preferXHR, opts.logReads));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
var index_1 = opts.index;
|
||||
if (typeof opts.bundle === 'string') {
|
||||
(0, xhr_1.asyncDownloadFile)(opts.bundle, "json", function (e, bundleData) {
|
||||
if (e) {
|
||||
console.error("Couldn't preload bundle", e);
|
||||
}
|
||||
cb(null, new BundledHTTPRequest(index_1, bundleData || {}, opts.baseUrl, opts.preferXHR, opts.logReads));
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, new BundledHTTPRequest(index_1, (opts.bundle || {}), opts.baseUrl, opts.preferXHR, opts.logReads));
|
||||
}
|
||||
}
|
||||
};
|
||||
BundledHTTPRequest.isAvailable = function () {
|
||||
return xhr_1.xhrIsAvailable || fetch_1.fetchIsAvailable;
|
||||
};
|
||||
BundledHTTPRequest.prototype.empty = function () {
|
||||
this._index.fileIterator(function (file) {
|
||||
file.fileData = null;
|
||||
});
|
||||
};
|
||||
BundledHTTPRequest.prototype.getName = function () {
|
||||
return BundledHTTPRequest.Name;
|
||||
};
|
||||
BundledHTTPRequest.prototype.diskSpace = function (path, cb) {
|
||||
// Read-only file system. We could calculate the total space, but that's not
|
||||
// important right now.
|
||||
cb(0, 0);
|
||||
};
|
||||
BundledHTTPRequest.prototype.isReadOnly = function () {
|
||||
return true;
|
||||
};
|
||||
BundledHTTPRequest.prototype.supportsLinks = function () {
|
||||
return false;
|
||||
};
|
||||
BundledHTTPRequest.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
BundledHTTPRequest.prototype.supportsSynch = function () {
|
||||
// Synchronous operations are only available via the XHR interface for now.
|
||||
return xhr_1.xhrIsAvailable;
|
||||
};
|
||||
BundledHTTPRequest.prototype.logRead = function (path, content) {
|
||||
var ctx = (self || global);
|
||||
ctx.fileReads = ctx.fileReads || {};
|
||||
if (!ctx.fileReads[path] || typeof ctx.fileReads[path] === 'number') {
|
||||
ctx.fileReads[path] = content;
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Special HTTPFS function: Preload the given file into the index.
|
||||
* @param [String] path
|
||||
* @param [BrowserFS.Buffer] buffer
|
||||
*/
|
||||
BundledHTTPRequest.prototype.preloadFile = function (path, buffer) {
|
||||
var inode = this._index.getInode(path);
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats = inode.getData();
|
||||
stats.size = buffer.length;
|
||||
stats.fileData = buffer;
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
BundledHTTPRequest.prototype.stat = function (path, isLstat, cb) {
|
||||
var _this = this;
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
return cb(api_error_1.ApiError.ENOENT(path));
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData();
|
||||
// At this point, a non-opened file will still have default stats from the listing.
|
||||
if (stats.size < 0) {
|
||||
this._requestFileSizeAsync(path, function (e, size) {
|
||||
if (e) {
|
||||
return cb(e);
|
||||
}
|
||||
if (_this._logReads) {
|
||||
// Log the read
|
||||
_this.logRead(path, size);
|
||||
}
|
||||
stats.size = size;
|
||||
cb(null, node_fs_stats_1.default.clone(stats));
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, node_fs_stats_1.default.clone(stats));
|
||||
}
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
cb(null, stats);
|
||||
}
|
||||
else {
|
||||
cb(api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, path));
|
||||
}
|
||||
};
|
||||
BundledHTTPRequest.prototype.statSync = function (path, isLstat) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData();
|
||||
// At this point, a non-opened file will still have default stats from the listing.
|
||||
if (stats.size < 0) {
|
||||
var size = this._requestFileSizeSync(path);
|
||||
if (this._logReads) {
|
||||
// Log the read
|
||||
this.logRead(path, size);
|
||||
}
|
||||
stats.size = size;
|
||||
}
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, path);
|
||||
}
|
||||
return stats;
|
||||
};
|
||||
BundledHTTPRequest.prototype.open = function (path, flags, mode, cb) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path));
|
||||
}
|
||||
var self = this;
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
return cb(api_error_1.ApiError.ENOENT(path));
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var stats_1 = inode.getData();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
return cb(api_error_1.ApiError.EEXIST(path));
|
||||
case file_flag_1.ActionType.NOP:
|
||||
// Use existing file contents.
|
||||
// XXX: Uh, this maintains the previously-used flag.
|
||||
if (stats_1.fileData) {
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.clone(stats_1), stats_1.fileData));
|
||||
}
|
||||
// @todo be lazier about actually requesting the file
|
||||
this._requestFileAsync(path, 'buffer', function (err, buffer) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
// we don't initially have file sizes
|
||||
stats_1.size = buffer.length;
|
||||
stats_1.fileData = buffer;
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.clone(stats_1), buffer));
|
||||
});
|
||||
break;
|
||||
default:
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.'));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return cb(api_error_1.ApiError.EISDIR(path));
|
||||
}
|
||||
};
|
||||
BundledHTTPRequest.prototype.openSync = function (path, flags, mode) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path);
|
||||
}
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var stats = inode.getData();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
throw api_error_1.ApiError.EEXIST(path);
|
||||
case file_flag_1.ActionType.NOP:
|
||||
// Use existing file contents.
|
||||
// XXX: Uh, this maintains the previously-used flag.
|
||||
if (stats.fileData) {
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, node_fs_stats_1.default.clone(stats), stats.fileData);
|
||||
}
|
||||
// @todo be lazier about actually requesting the file
|
||||
var buffer = this._requestFileSync(path, 'buffer');
|
||||
// we don't initially have file sizes
|
||||
stats.size = buffer.length;
|
||||
stats.fileData = buffer;
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, node_fs_stats_1.default.clone(stats), buffer);
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.');
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
BundledHTTPRequest.prototype.readdir = function (path, cb) {
|
||||
try {
|
||||
cb(null, this.readdirSync(path));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
BundledHTTPRequest.prototype.readdirSync = function (path) {
|
||||
// Check if it exists.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
return inode.getListing();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.ENOTDIR(path);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
BundledHTTPRequest.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
var _this = this;
|
||||
// Wrap cb in file closing code.
|
||||
var oldCb = cb;
|
||||
// Get file.
|
||||
this.open(fname, flag, 0x1a4, function (err, fd) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
cb = function (err, arg) {
|
||||
fd.close(function (err2) {
|
||||
if (!err) {
|
||||
err = err2;
|
||||
}
|
||||
return oldCb(err, arg);
|
||||
});
|
||||
};
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (_this._logReads) {
|
||||
// Log the read
|
||||
_this.logRead(fname, fdBuff.toString());
|
||||
}
|
||||
if (encoding === null) {
|
||||
cb(err, (0, util_1.copyingSlice)(fdBuff));
|
||||
}
|
||||
else {
|
||||
tryToString(fdBuff, encoding, cb);
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
BundledHTTPRequest.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
// Get file.
|
||||
var fd = this.openSync(fname, flag, 0x1a4);
|
||||
try {
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (this._logReads) {
|
||||
// Log the read
|
||||
this.logRead(fname, fdBuff.toString());
|
||||
}
|
||||
if (encoding === null) {
|
||||
return (0, util_1.copyingSlice)(fdBuff);
|
||||
}
|
||||
return fdBuff.toString(encoding);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
BundledHTTPRequest.prototype._getHTTPPath = function (filePath) {
|
||||
if (filePath.charAt(0) === '/') {
|
||||
filePath = filePath.slice(1);
|
||||
}
|
||||
return this.prefixUrl + filePath;
|
||||
};
|
||||
BundledHTTPRequest.prototype._requestFileAsync = function (p, type, cb) {
|
||||
this._requestFileAsyncInternal(this._getHTTPPath(p), type, cb);
|
||||
};
|
||||
BundledHTTPRequest.prototype._requestFileSync = function (p, type) {
|
||||
return this._requestFileSyncInternal(this._getHTTPPath(p), type);
|
||||
};
|
||||
/**
|
||||
* Only requests the HEAD content, for the file size.
|
||||
*/
|
||||
BundledHTTPRequest.prototype._requestFileSizeAsync = function (path, cb) {
|
||||
this._requestFileSizeAsyncInternal(this._getHTTPPath(path), cb);
|
||||
};
|
||||
BundledHTTPRequest.prototype._requestFileSizeSync = function (path) {
|
||||
return this._requestFileSizeSyncInternal(this._getHTTPPath(path));
|
||||
};
|
||||
BundledHTTPRequest.Name = "BundledHTTPRequest";
|
||||
BundledHTTPRequest.Options = {
|
||||
index: {
|
||||
type: ["string", "object"],
|
||||
optional: true,
|
||||
description: "URL to a file index as a JSON file or the file index object itself, generated with the make_http_index script. Defaults to `index.json`."
|
||||
},
|
||||
bundle: {
|
||||
type: ["string", "object"],
|
||||
optional: true,
|
||||
description: "URL to a JSON file with the files preloaded."
|
||||
},
|
||||
baseUrl: {
|
||||
type: "string",
|
||||
optional: true,
|
||||
description: "Used as the URL prefix for fetched files. Default: Fetch files relative to the index."
|
||||
},
|
||||
preferXHR: {
|
||||
type: "boolean",
|
||||
optional: true,
|
||||
description: "Whether to prefer XmlHttpRequest or fetch for async operations if both are available. Default: false"
|
||||
},
|
||||
logReads: {
|
||||
type: "boolean",
|
||||
optional: true,
|
||||
description: "Whether to log all reads of files and put them in an object, this is useful for getting initial bundles that you can put in 'bundle' option. Values are put on `global.readFiles`. Default: false."
|
||||
}
|
||||
};
|
||||
return BundledHTTPRequest;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = BundledHTTPRequest;
|
||||
//# sourceMappingURL=BundledHTTPRequest.js.map
|
||||
56
sandpack-generated/static/browserfs11/node/backend/CodeSandboxEditorFS.d.ts
vendored
Normal file
56
sandpack-generated/static/browserfs11/node/backend/CodeSandboxEditorFS.d.ts
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
/// <reference types="node" />
|
||||
import { File } from '../core/file';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { BFSCallback, BFSOneArgCallback, FileSystem, FileSystemOptions, SynchronousFileSystem } from '../core/file_system';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
export interface IModule {
|
||||
path: string;
|
||||
updatedAt: string;
|
||||
insertedAt: string;
|
||||
}
|
||||
export type IFile = IModule & {
|
||||
code: string | undefined;
|
||||
savedCode: string | null;
|
||||
isBinary: boolean;
|
||||
type: 'file';
|
||||
};
|
||||
export type IDirectory = IModule & {
|
||||
type: 'directory';
|
||||
};
|
||||
export interface IManager {
|
||||
getSandboxFs: () => {
|
||||
[path: string]: IFile | IDirectory;
|
||||
};
|
||||
getJwt: () => string;
|
||||
}
|
||||
export interface ICodeSandboxFileSystemOptions {
|
||||
api: IManager;
|
||||
}
|
||||
export default class CodeSandboxEditorFS extends SynchronousFileSystem implements FileSystem {
|
||||
static readonly Name = "CodeSandboxEditorFS";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates an InMemoryFileSystem instance.
|
||||
*/
|
||||
static Create(options: ICodeSandboxFileSystemOptions, cb: BFSCallback<CodeSandboxEditorFS>): void;
|
||||
static isAvailable(): boolean;
|
||||
private api;
|
||||
constructor(api: IManager);
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
empty(mainCb: BFSOneArgCallback): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
statSync(p: string, isLstate: boolean): Stats;
|
||||
createFileSync(p: string, flag: FileFlag, mode: number): File;
|
||||
open(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openFileSync(p: string, flag: FileFlag, mode: number): File;
|
||||
writeFileSync(): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdirSync(p: string): void;
|
||||
unlinkSync(p: string): void;
|
||||
readdirSync(path: string): string[];
|
||||
_sync(p: string, data: Buffer, cb: BFSCallback<Stats>): void;
|
||||
_syncSync(p: string, data: Buffer): void;
|
||||
}
|
||||
@@ -0,0 +1,249 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var api_error_1 = require("../core/api_error");
|
||||
/* eslint-disable */
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
function blobToBuffer(blob, cb) {
|
||||
if (typeof Blob === 'undefined' || !(blob instanceof Blob)) {
|
||||
throw new Error('first argument must be a Blob');
|
||||
}
|
||||
if (typeof cb !== 'function') {
|
||||
throw new Error('second argument must be a function');
|
||||
}
|
||||
var reader = new FileReader();
|
||||
function onLoadEnd(e) {
|
||||
reader.removeEventListener('loadend', onLoadEnd, false);
|
||||
if (e.error) {
|
||||
cb(e.error);
|
||||
}
|
||||
else {
|
||||
// @ts-ignore
|
||||
cb(null, Buffer.from(reader.result));
|
||||
}
|
||||
}
|
||||
reader.addEventListener('loadend', onLoadEnd, false);
|
||||
reader.readAsArrayBuffer(blob);
|
||||
}
|
||||
function getCode(savedCode, code) {
|
||||
if (savedCode === null) {
|
||||
return code || '';
|
||||
}
|
||||
return savedCode || '';
|
||||
}
|
||||
var CodeSandboxFile = /** @class */ (function (_super) {
|
||||
__extends(CodeSandboxFile, _super);
|
||||
function CodeSandboxFile(_fs, _path, _flag, _stat, contents) {
|
||||
return _super.call(this, _fs, _path, _flag, _stat, contents) || this;
|
||||
}
|
||||
CodeSandboxFile.prototype.sync = function (cb) {
|
||||
var _this = this;
|
||||
if (this.isDirty()) {
|
||||
var buffer = this.getBuffer();
|
||||
this._fs._sync(this.getPath(), buffer, function (e, stat) {
|
||||
if (!e) {
|
||||
_this.resetDirty();
|
||||
}
|
||||
cb(e);
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
};
|
||||
CodeSandboxFile.prototype.close = function (cb) {
|
||||
this.sync(cb);
|
||||
};
|
||||
CodeSandboxFile.prototype.syncSync = function () {
|
||||
if (this.isDirty()) {
|
||||
this._fs._syncSync(this.getPath(), this.getBuffer());
|
||||
this.resetDirty();
|
||||
}
|
||||
};
|
||||
CodeSandboxFile.prototype.closeSync = function () {
|
||||
this.syncSync();
|
||||
};
|
||||
return CodeSandboxFile;
|
||||
}(preload_file_1.default));
|
||||
var CodeSandboxEditorFS = /** @class */ (function (_super) {
|
||||
__extends(CodeSandboxEditorFS, _super);
|
||||
function CodeSandboxEditorFS(api) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this.api = api;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Creates an InMemoryFileSystem instance.
|
||||
*/
|
||||
CodeSandboxEditorFS.Create = function (options, cb) {
|
||||
cb(null, new CodeSandboxEditorFS(options.api));
|
||||
};
|
||||
CodeSandboxEditorFS.isAvailable = function () {
|
||||
return true;
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.getName = function () {
|
||||
return 'CodeSandboxEditorFS';
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.isReadOnly = function () {
|
||||
return false;
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.supportsSynch = function () {
|
||||
return true;
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.empty = function (mainCb) {
|
||||
throw new Error('Empty not supported');
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.renameSync = function (oldPath, newPath) {
|
||||
throw new Error('Rename not supported');
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.statSync = function (p, isLstate) {
|
||||
var modules = this.api.getSandboxFs();
|
||||
var moduleInfo = modules[p];
|
||||
if (!moduleInfo) {
|
||||
var modulesStartingWithPath = Object.keys(modules).filter(function (pa) { return pa.startsWith(p.endsWith('/') ? p : p + '/') || pa === p; });
|
||||
if (modulesStartingWithPath.length > 0) {
|
||||
return new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 0);
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.FileError(api_error_1.ErrorCode.ENOENT, p);
|
||||
}
|
||||
}
|
||||
if (moduleInfo.type === 'directory') {
|
||||
return new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 4096, undefined, +new Date(), +new Date(moduleInfo.updatedAt), +new Date(moduleInfo.insertedAt));
|
||||
}
|
||||
else {
|
||||
return new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, getCode(moduleInfo.savedCode, moduleInfo.code).length, undefined, +new Date(), +new Date(moduleInfo.updatedAt), +new Date(moduleInfo.insertedAt));
|
||||
}
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.createFileSync = function (p, flag, mode) {
|
||||
throw new Error('Create file not supported');
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.open = function (p, flag, mode, cb) {
|
||||
var _this = this;
|
||||
var moduleInfo = this.api.getSandboxFs()[p];
|
||||
if (!moduleInfo) {
|
||||
cb(api_error_1.ApiError.ENOENT(p));
|
||||
return;
|
||||
}
|
||||
if (moduleInfo.type === 'directory') {
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 4096, undefined, +new Date(), +new Date(moduleInfo.updatedAt), +new Date(moduleInfo.insertedAt));
|
||||
cb(null, new CodeSandboxFile(this, p, flag, stats));
|
||||
}
|
||||
else {
|
||||
var isBinary = moduleInfo.isBinary, savedCode = moduleInfo.savedCode, code = moduleInfo.code;
|
||||
if (isBinary) {
|
||||
var url = getCode(savedCode, code);
|
||||
var jwt = this.api.getJwt && this.api.getJwt();
|
||||
var sendAuth = jwt && new URL(url).origin === document.location.origin;
|
||||
var headers = sendAuth ? {
|
||||
Authorization: "Bearer ".concat(this.api.getJwt && this.api.getJwt())
|
||||
} : {};
|
||||
fetch(url, { headers: headers }).then(function (x) { return x.blob(); }).then(function (blob) {
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, blob.size, undefined, +new Date(), +new Date(moduleInfo.updatedAt), +new Date(moduleInfo.insertedAt));
|
||||
blobToBuffer(blob, function (err, r) {
|
||||
if (err) {
|
||||
cb(err);
|
||||
return;
|
||||
}
|
||||
cb(undefined, new CodeSandboxFile(_this, p, flag, stats, r));
|
||||
});
|
||||
});
|
||||
return;
|
||||
}
|
||||
var buffer = Buffer.from(getCode(savedCode, code));
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, buffer.length, undefined, +new Date(), +new Date(moduleInfo.updatedAt), +new Date(moduleInfo.insertedAt));
|
||||
cb(null, new CodeSandboxFile(this, p, flag, stats, buffer));
|
||||
}
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.openFileSync = function (p, flag, mode) {
|
||||
var moduleInfo = this.api.getSandboxFs()[p];
|
||||
if (!moduleInfo) {
|
||||
throw api_error_1.ApiError.ENOENT(p);
|
||||
}
|
||||
if (moduleInfo.type === 'directory') {
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 4096, undefined, +new Date(), +new Date(moduleInfo.updatedAt), +new Date(moduleInfo.insertedAt));
|
||||
return new CodeSandboxFile(this, p, flag, stats);
|
||||
}
|
||||
else {
|
||||
var savedCode = moduleInfo.savedCode, code = moduleInfo.code;
|
||||
var buffer = Buffer.from(getCode(savedCode, code));
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, buffer.length, undefined, +new Date(), +new Date(moduleInfo.updatedAt), +new Date(moduleInfo.insertedAt));
|
||||
return new CodeSandboxFile(this, p, flag, stats, buffer);
|
||||
}
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.writeFileSync = function () {
|
||||
// Stubbed
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.rmdirSync = function (p) {
|
||||
// Stubbed
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.mkdirSync = function (p) {
|
||||
// Stubbed
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.unlinkSync = function (p) {
|
||||
// Stubbed
|
||||
};
|
||||
CodeSandboxEditorFS.prototype.readdirSync = function (path) {
|
||||
var paths = Object.keys(this.api.getSandboxFs());
|
||||
var p = path.endsWith('/') ? path : path + '/';
|
||||
var pathsInDir = paths.filter(function (secondP) { return secondP.startsWith(p); });
|
||||
if (pathsInDir.length === 0) {
|
||||
return [];
|
||||
}
|
||||
var directChildren = new Set();
|
||||
var currentPathLength = p.split('/').length;
|
||||
pathsInDir
|
||||
.filter(function (np) { return np.split('/').length >= currentPathLength; })
|
||||
.forEach(function (np) {
|
||||
var parts = np.split('/');
|
||||
parts.length = currentPathLength;
|
||||
directChildren.add(parts.join('/'));
|
||||
});
|
||||
var pathArray = Array.from(directChildren).map(function (pa) { return pa.replace(p, ''); });
|
||||
return pathArray;
|
||||
};
|
||||
CodeSandboxEditorFS.prototype._sync = function (p, data, cb) {
|
||||
// Stubbed
|
||||
cb(null, undefined);
|
||||
};
|
||||
CodeSandboxEditorFS.prototype._syncSync = function (p, data) {
|
||||
// Stubbed
|
||||
};
|
||||
CodeSandboxEditorFS.Name = 'CodeSandboxEditorFS';
|
||||
CodeSandboxEditorFS.Options = {
|
||||
api: {
|
||||
type: 'object',
|
||||
description: 'The CodeSandbox Editor',
|
||||
validator: function (opt, cb) {
|
||||
if (opt) {
|
||||
cb();
|
||||
}
|
||||
else {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Manager is invalid'));
|
||||
}
|
||||
},
|
||||
},
|
||||
};
|
||||
return CodeSandboxEditorFS;
|
||||
}(file_system_1.SynchronousFileSystem));
|
||||
exports.default = CodeSandboxEditorFS;
|
||||
//# sourceMappingURL=CodeSandboxEditorFS.js.map
|
||||
48
sandpack-generated/static/browserfs11/node/backend/CodeSandboxFS.d.ts
vendored
Normal file
48
sandpack-generated/static/browserfs11/node/backend/CodeSandboxFS.d.ts
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
/// <reference types="node" />
|
||||
import { SynchronousFileSystem, FileSystem, BFSOneArgCallback, BFSCallback, FileSystemOptions } from "../core/file_system";
|
||||
import { File } from "../core/file";
|
||||
import { FileFlag } from "../core/file_flag";
|
||||
import { default as Stats } from "../core/node_fs_stats";
|
||||
export interface IModule {
|
||||
path?: string;
|
||||
code: string | undefined;
|
||||
}
|
||||
export interface IManager {
|
||||
getTranspiledModules: () => {
|
||||
[path: string]: {
|
||||
module: IModule;
|
||||
};
|
||||
};
|
||||
addModule(module: IModule): void;
|
||||
removeModule(module: IModule): void;
|
||||
moveModule(module: IModule, newPath: string): void;
|
||||
updateModule(module: IModule): void;
|
||||
}
|
||||
export interface ICodeSandboxFileSystemOptions {
|
||||
manager: IManager;
|
||||
}
|
||||
export default class CodeSandboxFS extends SynchronousFileSystem implements FileSystem {
|
||||
static readonly Name = "CodeSandboxFS";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates an InMemoryFileSystem instance.
|
||||
*/
|
||||
static Create(options: ICodeSandboxFileSystemOptions, cb: BFSCallback<CodeSandboxFS>): void;
|
||||
static isAvailable(): boolean;
|
||||
private manager;
|
||||
constructor(manager: IManager);
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
empty(mainCb: BFSOneArgCallback): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
statSync(p: string, isLstate: boolean): Stats;
|
||||
createFileSync(p: string, flag: FileFlag, mode: number): File;
|
||||
openFileSync(p: string, flag: FileFlag, mode: number): File;
|
||||
rmdirSync(p: string): void;
|
||||
mkdirSync(p: string): void;
|
||||
readdirSync(path: string): string[];
|
||||
_sync(p: string, data: Buffer, cb: BFSCallback<Stats>): void;
|
||||
_syncSync(p: string, data: Buffer): void;
|
||||
}
|
||||
@@ -0,0 +1,220 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var path = require("path");
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var CodeSandboxFile = /** @class */ (function (_super) {
|
||||
__extends(CodeSandboxFile, _super);
|
||||
function CodeSandboxFile(_fs, _path, _flag, _stat, contents) {
|
||||
return _super.call(this, _fs, _path, _flag, _stat, contents) || this;
|
||||
}
|
||||
CodeSandboxFile.prototype.sync = function (cb) {
|
||||
var _this = this;
|
||||
if (this.isDirty()) {
|
||||
var buffer = this.getBuffer();
|
||||
this._fs._sync(this.getPath(), buffer, function (e, stat) {
|
||||
if (!e) {
|
||||
_this.resetDirty();
|
||||
}
|
||||
cb(e);
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
};
|
||||
CodeSandboxFile.prototype.close = function (cb) {
|
||||
this.sync(cb);
|
||||
};
|
||||
CodeSandboxFile.prototype.syncSync = function () {
|
||||
if (this.isDirty()) {
|
||||
this._fs._syncSync(this.getPath(), this.getBuffer());
|
||||
this.resetDirty();
|
||||
}
|
||||
};
|
||||
CodeSandboxFile.prototype.closeSync = function () {
|
||||
this.syncSync();
|
||||
};
|
||||
return CodeSandboxFile;
|
||||
}(preload_file_1.default));
|
||||
var CodeSandboxFS = /** @class */ (function (_super) {
|
||||
__extends(CodeSandboxFS, _super);
|
||||
function CodeSandboxFS(manager) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this.manager = manager;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Creates an InMemoryFileSystem instance.
|
||||
*/
|
||||
CodeSandboxFS.Create = function (options, cb) {
|
||||
cb(null, new CodeSandboxFS(options.manager));
|
||||
};
|
||||
CodeSandboxFS.isAvailable = function () {
|
||||
return true;
|
||||
};
|
||||
CodeSandboxFS.prototype.getName = function () {
|
||||
return "CodeSandboxFS";
|
||||
};
|
||||
CodeSandboxFS.prototype.isReadOnly = function () {
|
||||
return false;
|
||||
};
|
||||
CodeSandboxFS.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
CodeSandboxFS.prototype.supportsSynch = function () {
|
||||
return true;
|
||||
};
|
||||
CodeSandboxFS.prototype.empty = function (mainCb) {
|
||||
var _this = this;
|
||||
var tModules = this.manager.getTranspiledModules();
|
||||
Object.keys(tModules).forEach(function (pa) {
|
||||
_this.manager.removeModule(tModules[pa].module);
|
||||
});
|
||||
mainCb();
|
||||
};
|
||||
CodeSandboxFS.prototype.renameSync = function (oldPath, newPath) {
|
||||
var _this = this;
|
||||
var tModules = this.manager.getTranspiledModules();
|
||||
var modulesWithPath = Object.keys(tModules).filter(function (p) { return p.startsWith(oldPath) + "/" || p === oldPath; });
|
||||
if (modulesWithPath.length === 0) {
|
||||
throw api_error_1.ApiError.FileError(api_error_1.ErrorCode.ENOENT, oldPath);
|
||||
}
|
||||
modulesWithPath
|
||||
.map(function (p) { return ({ path: p, moduleInfo: tModules[p] }); })
|
||||
.forEach(function (_a) {
|
||||
var path = _a.path, moduleInfo = _a.moduleInfo;
|
||||
var module = moduleInfo.module;
|
||||
_this.manager.moveModule(module, path.replace(oldPath, newPath));
|
||||
});
|
||||
};
|
||||
CodeSandboxFS.prototype.statSync = function (p, isLstate) {
|
||||
var tModules = this.manager.getTranspiledModules();
|
||||
var moduleInfo = tModules[p];
|
||||
if (!moduleInfo) {
|
||||
var modulesStartingWithPath = Object.keys(tModules).filter(function (pa) { return pa.startsWith(p.endsWith("/") ? p : p + "/") || pa === p; });
|
||||
if (modulesStartingWithPath.length > 0) {
|
||||
return new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 0);
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.FileError(api_error_1.ErrorCode.ENOENT, p);
|
||||
}
|
||||
}
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, Buffer.byteLength(moduleInfo.module.code || '', 'utf8'));
|
||||
return stats;
|
||||
};
|
||||
CodeSandboxFS.prototype.createFileSync = function (p, flag, mode) {
|
||||
if (p === "/") {
|
||||
throw api_error_1.ApiError.EEXIST(p);
|
||||
}
|
||||
if (this.manager.getTranspiledModules()[p]) {
|
||||
throw api_error_1.ApiError.EEXIST(p);
|
||||
}
|
||||
var module = {
|
||||
path: p,
|
||||
code: ""
|
||||
};
|
||||
this.manager.addModule(module);
|
||||
var buffer = Buffer.from(module.code || "");
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, buffer.length);
|
||||
return new CodeSandboxFile(this, p, flag, stats, buffer);
|
||||
};
|
||||
CodeSandboxFS.prototype.openFileSync = function (p, flag, mode) {
|
||||
var moduleInfo = this.manager.getTranspiledModules()[p];
|
||||
if (!moduleInfo) {
|
||||
throw api_error_1.ApiError.ENOENT(p);
|
||||
}
|
||||
var _a = moduleInfo.module.code, code = _a === void 0 ? "" : _a;
|
||||
var buffer = Buffer.from(code || "");
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, buffer.length);
|
||||
return new CodeSandboxFile(this, p, flag, stats, buffer);
|
||||
};
|
||||
CodeSandboxFS.prototype.rmdirSync = function (p) {
|
||||
var _this = this;
|
||||
var tModules = this.manager.getTranspiledModules();
|
||||
Object.keys(tModules)
|
||||
.filter(function (pa) { return pa.startsWith(p + "/") || p === pa; })
|
||||
.forEach(function (pa) {
|
||||
var module = tModules[pa].module;
|
||||
_this.manager.removeModule(module);
|
||||
});
|
||||
};
|
||||
CodeSandboxFS.prototype.mkdirSync = function (p) {
|
||||
// CodeSandbox Manager doesn't have the concept of directories, like git.
|
||||
// For now we will do nothing, as we pretend that every directory already exists.
|
||||
};
|
||||
CodeSandboxFS.prototype.readdirSync = function (path) {
|
||||
var paths = Object.keys(this.manager.getTranspiledModules());
|
||||
var p = path.endsWith("/") ? path : path + "/";
|
||||
var pathsInDir = paths.filter(function (secondP) { return secondP.startsWith(p); });
|
||||
if (pathsInDir.length === 0) {
|
||||
return [];
|
||||
}
|
||||
var directChildren = new Set();
|
||||
var currentPathLength = p.split("/").length;
|
||||
pathsInDir
|
||||
.filter(function (np) { return np.split("/").length >= currentPathLength; })
|
||||
.forEach(function (np) {
|
||||
var parts = np.split("/");
|
||||
parts.length = currentPathLength;
|
||||
directChildren.add(parts.join("/"));
|
||||
});
|
||||
var pathArray = Array.from(directChildren).map(function (pa) { return pa.replace(p, ""); });
|
||||
return pathArray;
|
||||
};
|
||||
CodeSandboxFS.prototype._sync = function (p, data, cb) {
|
||||
var _this = this;
|
||||
var parent = path.dirname(p);
|
||||
this.stat(parent, false, function (error, stat) {
|
||||
if (error) {
|
||||
cb(api_error_1.ApiError.FileError(api_error_1.ErrorCode.ENOENT, parent));
|
||||
}
|
||||
else {
|
||||
var module_1 = _this.manager.getTranspiledModules()[p].module;
|
||||
_this.manager.updateModule(module_1);
|
||||
cb(null);
|
||||
}
|
||||
});
|
||||
};
|
||||
CodeSandboxFS.prototype._syncSync = function (p, data) {
|
||||
var parent = path.dirname(p);
|
||||
this.statSync(parent, false);
|
||||
var module = this.manager.getTranspiledModules()[p].module;
|
||||
this.manager.updateModule(module);
|
||||
};
|
||||
CodeSandboxFS.Name = "CodeSandboxFS";
|
||||
CodeSandboxFS.Options = {
|
||||
manager: {
|
||||
type: "object",
|
||||
description: "The CodeSandbox Manager",
|
||||
validator: function (opt, cb) {
|
||||
if (opt) {
|
||||
cb();
|
||||
}
|
||||
else {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Manager is invalid"));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
return CodeSandboxFS;
|
||||
}(file_system_1.SynchronousFileSystem));
|
||||
exports.default = CodeSandboxFS;
|
||||
//# sourceMappingURL=CodeSandboxFS.js.map
|
||||
68
sandpack-generated/static/browserfs11/node/backend/Dropbox.d.ts
vendored
Normal file
68
sandpack-generated/static/browserfs11/node/backend/Dropbox.d.ts
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
/// <reference types="node" />
|
||||
import PreloadFile from '../generic/preload_file';
|
||||
import { BaseFileSystem, FileSystem, BFSOneArgCallback, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { File } from '../core/file';
|
||||
export declare class DropboxFile extends PreloadFile<DropboxFileSystem> implements File {
|
||||
constructor(_fs: DropboxFileSystem, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
}
|
||||
/**
|
||||
* Options for the Dropbox file system.
|
||||
*/
|
||||
export interface DropboxFileSystemOptions {
|
||||
client: DropboxTypes.Dropbox;
|
||||
}
|
||||
/**
|
||||
* A read/write file system backed by Dropbox cloud storage.
|
||||
*
|
||||
* Uses the Dropbox V2 API, and the 2.x JS SDK.
|
||||
*/
|
||||
export default class DropboxFileSystem extends BaseFileSystem implements FileSystem {
|
||||
static readonly Name = "DropboxV2";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates a new DropboxFileSystem instance with the given options.
|
||||
* Must be given an *authenticated* Dropbox client from 2.x JS SDK.
|
||||
*/
|
||||
static Create(opts: DropboxFileSystemOptions, cb: BFSCallback<DropboxFileSystem>): void;
|
||||
static isAvailable(): boolean;
|
||||
private _client;
|
||||
private constructor();
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsSymlinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Deletes *everything* in the file system. Mainly intended for unit testing!
|
||||
* @param mainCb Called when operation completes.
|
||||
*/
|
||||
empty(mainCb: BFSOneArgCallback): void;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
stat(path: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
openFile(path: string, flags: FileFlag, cb: BFSCallback<File>): void;
|
||||
createFile(p: string, flags: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
/**
|
||||
* Delete a file
|
||||
*/
|
||||
unlink(path: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Delete a directory
|
||||
*/
|
||||
rmdir(path: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Create a directory
|
||||
*/
|
||||
mkdir(p: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Get the names of the files in a directory
|
||||
*/
|
||||
readdir(path: string, cb: BFSCallback<string[]>): void;
|
||||
/**
|
||||
* (Internal) Syncs file to Dropbox.
|
||||
*/
|
||||
_syncFile(p: string, d: Buffer, cb: BFSOneArgCallback): void;
|
||||
}
|
||||
566
sandpack-generated/static/browserfs11/node/backend/Dropbox.js
Normal file
566
sandpack-generated/static/browserfs11/node/backend/Dropbox.js
Normal file
@@ -0,0 +1,566 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DropboxFile = void 0;
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var util_1 = require("../core/util");
|
||||
var dropbox_bridge_1 = require("dropbox_bridge");
|
||||
var setImmediate_1 = require("../generic/setImmediate");
|
||||
var path_1 = require("path");
|
||||
/**
|
||||
* Dropbox paths do not begin with a /, they just begin with a folder at the root node.
|
||||
* Here, we strip the `/`.
|
||||
* @param p An absolute path
|
||||
*/
|
||||
function FixPath(p) {
|
||||
if (p === '/') {
|
||||
return '';
|
||||
}
|
||||
else {
|
||||
return p;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* HACK: Dropbox errors are FUBAR'd sometimes.
|
||||
* @url https://github.com/dropbox/dropbox-sdk-js/issues/146
|
||||
* @param e
|
||||
*/
|
||||
function ExtractTheFuckingError(e) {
|
||||
var obj = e.error;
|
||||
if (obj['.tag']) {
|
||||
// Everything is OK.
|
||||
return obj;
|
||||
}
|
||||
else if (obj['error']) {
|
||||
// Terrible nested object bug.
|
||||
var obj2 = obj.error;
|
||||
if (obj2['.tag']) {
|
||||
return obj2;
|
||||
}
|
||||
else if (obj2['reason'] && obj2['reason']['.tag']) {
|
||||
return obj2.reason;
|
||||
}
|
||||
else {
|
||||
return obj2;
|
||||
}
|
||||
}
|
||||
else if (typeof (obj) === 'string') {
|
||||
// Might be a fucking JSON object error.
|
||||
try {
|
||||
var obj2 = JSON.parse(obj);
|
||||
if (obj2['error'] && obj2['error']['reason'] && obj2['error']['reason']['.tag']) {
|
||||
return obj2.error.reason;
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// Nope. Give up.
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
/**
|
||||
* Returns a user-facing error message given an error.
|
||||
*
|
||||
* HACK: Dropbox error messages sometimes lack a `user_message` field.
|
||||
* Sometimes, they are even strings. Ugh.
|
||||
* @url https://github.com/dropbox/dropbox-sdk-js/issues/146
|
||||
* @url https://github.com/dropbox/dropbox-sdk-js/issues/145
|
||||
* @url https://github.com/dropbox/dropbox-sdk-js/issues/144
|
||||
* @param err An error.
|
||||
*/
|
||||
function GetErrorMessage(err) {
|
||||
if (err['user_message']) {
|
||||
return err.user_message.text;
|
||||
}
|
||||
else if (err['error_summary']) {
|
||||
return err.error_summary;
|
||||
}
|
||||
else if (typeof (err.error) === "string") {
|
||||
return err.error;
|
||||
}
|
||||
else if (typeof (err.error) === "object") {
|
||||
// DROPBOX BUG: Sometimes, error is a nested error.
|
||||
return GetErrorMessage(err.error);
|
||||
}
|
||||
else {
|
||||
throw new Error("Dropbox's servers gave us a garbage error message: ".concat(JSON.stringify(err)));
|
||||
}
|
||||
}
|
||||
function LookupErrorToError(err, p, msg) {
|
||||
switch (err['.tag']) {
|
||||
case 'malformed_path':
|
||||
return new api_error_1.ApiError(api_error_1.ErrorCode.EBADF, msg, p);
|
||||
case 'not_found':
|
||||
return api_error_1.ApiError.ENOENT(p);
|
||||
case 'not_file':
|
||||
return api_error_1.ApiError.EISDIR(p);
|
||||
case 'not_folder':
|
||||
return api_error_1.ApiError.ENOTDIR(p);
|
||||
case 'restricted_content':
|
||||
return api_error_1.ApiError.EPERM(p);
|
||||
case 'other':
|
||||
default:
|
||||
return new api_error_1.ApiError(api_error_1.ErrorCode.EIO, msg, p);
|
||||
}
|
||||
}
|
||||
function WriteErrorToError(err, p, msg) {
|
||||
switch (err['.tag']) {
|
||||
case 'malformed_path':
|
||||
case 'disallowed_name':
|
||||
return new api_error_1.ApiError(api_error_1.ErrorCode.EBADF, msg, p);
|
||||
case 'conflict':
|
||||
case 'no_write_permission':
|
||||
case 'team_folder':
|
||||
return api_error_1.ApiError.EPERM(p);
|
||||
case 'insufficient_space':
|
||||
return new api_error_1.ApiError(api_error_1.ErrorCode.ENOSPC, msg);
|
||||
case 'other':
|
||||
default:
|
||||
return new api_error_1.ApiError(api_error_1.ErrorCode.EIO, msg, p);
|
||||
}
|
||||
}
|
||||
function FilesDeleteWrapped(client, p, cb) {
|
||||
var arg = {
|
||||
path: FixPath(p)
|
||||
};
|
||||
client.filesDeleteV2(arg)
|
||||
.then(function () {
|
||||
cb();
|
||||
}).catch(function (e) {
|
||||
var err = ExtractTheFuckingError(e);
|
||||
switch (err['.tag']) {
|
||||
case 'path_lookup':
|
||||
cb(LookupErrorToError(err.path_lookup, p, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'path_write':
|
||||
cb(WriteErrorToError(err.path_write, p, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'too_many_write_operations':
|
||||
setTimeout(function () { return FilesDeleteWrapped(client, p, cb); }, 500 + (300 * (Math.random())));
|
||||
break;
|
||||
case 'other':
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, GetErrorMessage(e), p));
|
||||
break;
|
||||
}
|
||||
});
|
||||
}
|
||||
var DropboxFile = /** @class */ (function (_super) {
|
||||
__extends(DropboxFile, _super);
|
||||
function DropboxFile(_fs, _path, _flag, _stat, contents) {
|
||||
return _super.call(this, _fs, _path, _flag, _stat, contents) || this;
|
||||
}
|
||||
DropboxFile.prototype.sync = function (cb) {
|
||||
this._fs._syncFile(this.getPath(), this.getBuffer(), cb);
|
||||
};
|
||||
DropboxFile.prototype.close = function (cb) {
|
||||
this.sync(cb);
|
||||
};
|
||||
return DropboxFile;
|
||||
}(preload_file_1.default));
|
||||
exports.DropboxFile = DropboxFile;
|
||||
/**
|
||||
* A read/write file system backed by Dropbox cloud storage.
|
||||
*
|
||||
* Uses the Dropbox V2 API, and the 2.x JS SDK.
|
||||
*/
|
||||
var DropboxFileSystem = /** @class */ (function (_super) {
|
||||
__extends(DropboxFileSystem, _super);
|
||||
function DropboxFileSystem(client) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this._client = client;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Creates a new DropboxFileSystem instance with the given options.
|
||||
* Must be given an *authenticated* Dropbox client from 2.x JS SDK.
|
||||
*/
|
||||
DropboxFileSystem.Create = function (opts, cb) {
|
||||
cb(null, new DropboxFileSystem(opts.client));
|
||||
};
|
||||
DropboxFileSystem.isAvailable = function () {
|
||||
// Checks if the Dropbox library is loaded.
|
||||
return typeof dropbox_bridge_1.Dropbox !== 'undefined';
|
||||
};
|
||||
DropboxFileSystem.prototype.getName = function () {
|
||||
return DropboxFileSystem.Name;
|
||||
};
|
||||
DropboxFileSystem.prototype.isReadOnly = function () {
|
||||
return false;
|
||||
};
|
||||
// Dropbox doesn't support symlinks, properties, or synchronous calls
|
||||
// TODO: does it???
|
||||
DropboxFileSystem.prototype.supportsSymlinks = function () {
|
||||
return false;
|
||||
};
|
||||
DropboxFileSystem.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
DropboxFileSystem.prototype.supportsSynch = function () {
|
||||
return false;
|
||||
};
|
||||
/**
|
||||
* Deletes *everything* in the file system. Mainly intended for unit testing!
|
||||
* @param mainCb Called when operation completes.
|
||||
*/
|
||||
DropboxFileSystem.prototype.empty = function (mainCb) {
|
||||
var _this = this;
|
||||
this.readdir('/', function (e, paths) {
|
||||
if (paths) {
|
||||
var next_1 = function (e) {
|
||||
if (paths.length === 0) {
|
||||
mainCb();
|
||||
}
|
||||
else {
|
||||
FilesDeleteWrapped(_this._client, paths.shift(), next_1);
|
||||
}
|
||||
};
|
||||
next_1();
|
||||
}
|
||||
else {
|
||||
mainCb(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
DropboxFileSystem.prototype.rename = function (oldPath, newPath, cb) {
|
||||
var _this = this;
|
||||
// Dropbox doesn't let you rename things over existing things, but POSIX does.
|
||||
// So, we need to see if newPath exists...
|
||||
this.stat(newPath, false, function (e, stats) {
|
||||
var rename = function () {
|
||||
var relocationArg = {
|
||||
from_path: FixPath(oldPath),
|
||||
to_path: FixPath(newPath)
|
||||
};
|
||||
_this._client.filesMoveV2(relocationArg)
|
||||
.then(function () { return cb(); })
|
||||
.catch(function (e) {
|
||||
var err = ExtractTheFuckingError(e);
|
||||
switch (err['.tag']) {
|
||||
case 'from_lookup':
|
||||
cb(LookupErrorToError(err.from_lookup, oldPath, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'from_write':
|
||||
cb(WriteErrorToError(err.from_write, oldPath, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'to':
|
||||
cb(WriteErrorToError(err.to, newPath, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'cant_copy_shared_folder':
|
||||
case 'cant_nest_shared_folder':
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, GetErrorMessage(e), oldPath));
|
||||
break;
|
||||
case 'cant_move_folder_into_itself':
|
||||
case 'duplicated_or_nested_paths':
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EBADF, GetErrorMessage(e), oldPath));
|
||||
break;
|
||||
case 'too_many_files':
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOSPC, GetErrorMessage(e), oldPath));
|
||||
break;
|
||||
case 'other':
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, GetErrorMessage(e), oldPath));
|
||||
break;
|
||||
}
|
||||
});
|
||||
};
|
||||
if (e) {
|
||||
// Doesn't exist. Proceed!
|
||||
rename();
|
||||
}
|
||||
else if (oldPath === newPath) {
|
||||
// NOP if the path exists. Error if it doesn't exist.
|
||||
if (e) {
|
||||
cb(api_error_1.ApiError.ENOENT(newPath));
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
}
|
||||
else if (stats && stats.isDirectory()) {
|
||||
// Exists, is a directory. Cannot rename over an existing directory.
|
||||
cb(api_error_1.ApiError.EISDIR(newPath));
|
||||
}
|
||||
else {
|
||||
// Exists, is a file, and differs from oldPath. Delete and rename.
|
||||
_this.unlink(newPath, function (e) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
rename();
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
DropboxFileSystem.prototype.stat = function (path, isLstat, cb) {
|
||||
if (path === '/') {
|
||||
// Dropbox doesn't support querying the root directory.
|
||||
(0, setImmediate_1.default)(function () {
|
||||
cb(null, new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 4096));
|
||||
});
|
||||
return;
|
||||
}
|
||||
var arg = {
|
||||
path: FixPath(path)
|
||||
};
|
||||
this._client.filesGetMetadata(arg).then(function (ref) {
|
||||
switch (ref['.tag']) {
|
||||
case 'file':
|
||||
var fileMetadata = ref;
|
||||
// TODO: Parse time fields.
|
||||
cb(null, new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, fileMetadata.size));
|
||||
break;
|
||||
case 'folder':
|
||||
cb(null, new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 4096));
|
||||
break;
|
||||
case 'deleted':
|
||||
cb(api_error_1.ApiError.ENOENT(path));
|
||||
break;
|
||||
default:
|
||||
// Unknown.
|
||||
break;
|
||||
}
|
||||
}).catch(function (e) {
|
||||
var err = ExtractTheFuckingError(e);
|
||||
switch (err['.tag']) {
|
||||
case 'path':
|
||||
cb(LookupErrorToError(err.path, path, GetErrorMessage(e)));
|
||||
break;
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, GetErrorMessage(e), path));
|
||||
break;
|
||||
}
|
||||
});
|
||||
};
|
||||
DropboxFileSystem.prototype.openFile = function (path, flags, cb) {
|
||||
var _this = this;
|
||||
var downloadArg = {
|
||||
path: FixPath(path)
|
||||
};
|
||||
this._client.filesDownload(downloadArg).then(function (res) {
|
||||
var b = res.fileBlob;
|
||||
var fr = new FileReader();
|
||||
fr.onload = function () {
|
||||
var ab = fr.result;
|
||||
cb(null, new DropboxFile(_this, path, flags, new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, ab.byteLength), (0, util_1.arrayBuffer2Buffer)(ab)));
|
||||
};
|
||||
fr.readAsArrayBuffer(b);
|
||||
}).catch(function (e) {
|
||||
var err = ExtractTheFuckingError(e);
|
||||
switch (err['.tag']) {
|
||||
case 'path':
|
||||
var dpError = err;
|
||||
cb(LookupErrorToError(dpError.path, path, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'other':
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, GetErrorMessage(e), path));
|
||||
break;
|
||||
}
|
||||
});
|
||||
};
|
||||
DropboxFileSystem.prototype.createFile = function (p, flags, mode, cb) {
|
||||
var _this = this;
|
||||
var fileData = Buffer.alloc(0);
|
||||
var blob = new Blob([(0, util_1.buffer2ArrayBuffer)(fileData)], { type: "octet/stream" });
|
||||
var commitInfo = {
|
||||
contents: blob,
|
||||
path: FixPath(p)
|
||||
};
|
||||
this._client.filesUpload(commitInfo).then(function (metadata) {
|
||||
cb(null, new DropboxFile(_this, p, flags, new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, 0), fileData));
|
||||
}).catch(function (e) {
|
||||
var err = ExtractTheFuckingError(e);
|
||||
// HACK: Casting to 'any' since tag can be 'too_many_write_operations'.
|
||||
switch (err['.tag']) {
|
||||
case 'path':
|
||||
var upError = err;
|
||||
cb(WriteErrorToError(upError.path.reason, p, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'too_many_write_operations':
|
||||
// Retry in (500, 800) ms.
|
||||
setTimeout(function () { return _this.createFile(p, flags, mode, cb); }, 500 + (300 * (Math.random())));
|
||||
break;
|
||||
case 'other':
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, GetErrorMessage(e), p));
|
||||
break;
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Delete a file
|
||||
*/
|
||||
DropboxFileSystem.prototype.unlink = function (path, cb) {
|
||||
var _this = this;
|
||||
// Must be a file. Check first.
|
||||
this.stat(path, false, function (e, stat) {
|
||||
if (stat) {
|
||||
if (stat.isDirectory()) {
|
||||
cb(api_error_1.ApiError.EISDIR(path));
|
||||
}
|
||||
else {
|
||||
FilesDeleteWrapped(_this._client, path, cb);
|
||||
}
|
||||
}
|
||||
else {
|
||||
cb(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Delete a directory
|
||||
*/
|
||||
DropboxFileSystem.prototype.rmdir = function (path, cb) {
|
||||
var _this = this;
|
||||
this.readdir(path, function (e, paths) {
|
||||
if (paths) {
|
||||
if (paths.length > 0) {
|
||||
cb(api_error_1.ApiError.ENOTEMPTY(path));
|
||||
}
|
||||
else {
|
||||
FilesDeleteWrapped(_this._client, path, cb);
|
||||
}
|
||||
}
|
||||
else {
|
||||
cb(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Create a directory
|
||||
*/
|
||||
DropboxFileSystem.prototype.mkdir = function (p, mode, cb) {
|
||||
var _this = this;
|
||||
// Dropbox's create_folder is recursive. Check if parent exists.
|
||||
var parent = (0, path_1.dirname)(p);
|
||||
this.stat(parent, false, function (e, stats) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else if (stats && !stats.isDirectory()) {
|
||||
cb(api_error_1.ApiError.ENOTDIR(parent));
|
||||
}
|
||||
else {
|
||||
var arg = {
|
||||
path: FixPath(p)
|
||||
};
|
||||
_this._client.filesCreateFolderV2(arg).then(function () { return cb(); }).catch(function (e) {
|
||||
var err = ExtractTheFuckingError(e);
|
||||
if (err['.tag'] === "too_many_write_operations") {
|
||||
// Retry in a bit.
|
||||
setTimeout(function () { return _this.mkdir(p, mode, cb); }, 500 + (300 * (Math.random())));
|
||||
}
|
||||
else {
|
||||
cb(WriteErrorToError(ExtractTheFuckingError(e).path, p, GetErrorMessage(e)));
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Get the names of the files in a directory
|
||||
*/
|
||||
DropboxFileSystem.prototype.readdir = function (path, cb) {
|
||||
var _this = this;
|
||||
var arg = {
|
||||
path: FixPath(path)
|
||||
};
|
||||
this._client.filesListFolder(arg).then(function (res) {
|
||||
ContinueReadingDir(_this._client, path, res, [], cb);
|
||||
}).catch(function (e) {
|
||||
ProcessListFolderError(e, path, cb);
|
||||
});
|
||||
};
|
||||
/**
|
||||
* (Internal) Syncs file to Dropbox.
|
||||
*/
|
||||
DropboxFileSystem.prototype._syncFile = function (p, d, cb) {
|
||||
var _this = this;
|
||||
var blob = new Blob([(0, util_1.buffer2ArrayBuffer)(d)], { type: "octet/stream" });
|
||||
var arg = {
|
||||
contents: blob,
|
||||
path: FixPath(p),
|
||||
mode: {
|
||||
'.tag': 'overwrite'
|
||||
}
|
||||
};
|
||||
this._client.filesUpload(arg).then(function () {
|
||||
cb();
|
||||
}).catch(function (e) {
|
||||
var err = ExtractTheFuckingError(e);
|
||||
switch (err['.tag']) {
|
||||
case 'path':
|
||||
var upError = err;
|
||||
cb(WriteErrorToError(upError.path.reason, p, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'too_many_write_operations':
|
||||
setTimeout(function () { return _this._syncFile(p, d, cb); }, 500 + (300 * (Math.random())));
|
||||
break;
|
||||
case 'other':
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, GetErrorMessage(e), p));
|
||||
break;
|
||||
}
|
||||
});
|
||||
};
|
||||
DropboxFileSystem.Name = "DropboxV2";
|
||||
DropboxFileSystem.Options = {
|
||||
client: {
|
||||
type: "object",
|
||||
description: "An *authenticated* Dropbox client. Must be from the 2.5.x JS SDK."
|
||||
}
|
||||
};
|
||||
return DropboxFileSystem;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = DropboxFileSystem;
|
||||
function ProcessListFolderError(e, path, cb) {
|
||||
var err = ExtractTheFuckingError(e);
|
||||
switch (err['.tag']) {
|
||||
case 'path':
|
||||
var pathError = err;
|
||||
cb(LookupErrorToError(pathError.path, path, GetErrorMessage(e)));
|
||||
break;
|
||||
case 'other':
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, GetErrorMessage(e), path));
|
||||
break;
|
||||
}
|
||||
}
|
||||
function ContinueReadingDir(client, path, res, previousEntries, cb) {
|
||||
var newEntries = res.entries.map(function (e) { return e.path_display; }).filter(Boolean);
|
||||
var entries = previousEntries.concat(newEntries);
|
||||
if (!res.has_more) {
|
||||
cb(null, entries);
|
||||
}
|
||||
else {
|
||||
var arg = {
|
||||
cursor: res.cursor
|
||||
};
|
||||
client.filesListFolderContinue(arg).then(function (res) {
|
||||
ContinueReadingDir(client, path, res, entries, cb);
|
||||
}).catch(function (e) {
|
||||
ProcessListFolderError(e, path, cb);
|
||||
});
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=Dropbox.js.map
|
||||
85
sandpack-generated/static/browserfs11/node/backend/DynamicHTTPRequest.d.ts
vendored
Normal file
85
sandpack-generated/static/browserfs11/node/backend/DynamicHTTPRequest.d.ts
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFileSystem, FileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { File } from '../core/file';
|
||||
import Stats from '../core/node_fs_stats';
|
||||
/**
|
||||
* Configuration options for a DynamicHTTPRequest file system.
|
||||
*/
|
||||
export interface DynamicHTTPRequestOptions {
|
||||
index?: string | object;
|
||||
baseUrl?: string;
|
||||
preferXHR?: boolean;
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
export default class DynamicHTTPRequest extends BaseFileSystem implements FileSystem {
|
||||
static readonly Name = "DynamicHTTPRequest";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Construct an DynamicHTTPRequest file system backend with the given options.
|
||||
*/
|
||||
static Create(opts: DynamicHTTPRequestOptions, cb: BFSCallback<DynamicHTTPRequest>): void;
|
||||
static isAvailable(): boolean;
|
||||
readonly prefixUrl: string;
|
||||
private _requestFileAsyncInternal;
|
||||
private _requestFileSyncInternal;
|
||||
private constructor();
|
||||
private convertAPIError;
|
||||
empty(): void;
|
||||
getName(): string;
|
||||
diskSpace(path: string, cb: (total: number, free: number) => void): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
stat(path: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
statSync(path: string, isLstat: boolean): Stats;
|
||||
open(path: string, flags: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openSync(path: string, flags: FileFlag, mode: number): File;
|
||||
readdir(path: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(path: string): string[];
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
readFile(fname: string, encoding: string, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
private _getHTTPPath;
|
||||
/**
|
||||
* Asynchronously download the given file.
|
||||
*/
|
||||
private _requestFileAsync;
|
||||
/**
|
||||
* Synchronously download the given file.
|
||||
*/
|
||||
private _requestFileSync;
|
||||
}
|
||||
@@ -0,0 +1,273 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var util_1 = require("../core/util");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var xhr_1 = require("../generic/xhr");
|
||||
var fetch_1 = require("../generic/fetch");
|
||||
/**
|
||||
* Try to convert the given buffer into a string, and pass it to the callback.
|
||||
* Optimization that removes the needed try/catch into a helper function, as
|
||||
* this is an uncommon case.
|
||||
* @hidden
|
||||
*/
|
||||
function tryToString(buff, encoding, cb) {
|
||||
try {
|
||||
cb(null, buff.toString(encoding));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
}
|
||||
function syncNotAvailableError() {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP, "Synchronous HTTP download methods are not available in this environment.");
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
var DynamicHTTPRequest = /** @class */ (function (_super) {
|
||||
__extends(DynamicHTTPRequest, _super);
|
||||
// private _requestFileSizeSyncInternal: (p: string) => number;
|
||||
function DynamicHTTPRequest(prefixUrl, preferXHR) {
|
||||
if (prefixUrl === void 0) { prefixUrl = ''; }
|
||||
if (preferXHR === void 0) { preferXHR = false; }
|
||||
var _this = _super.call(this) || this;
|
||||
// prefix_url must end in a directory separator.
|
||||
if (prefixUrl.length > 0 && prefixUrl.charAt(prefixUrl.length - 1) !== '/') {
|
||||
prefixUrl = prefixUrl + '/';
|
||||
}
|
||||
_this.prefixUrl = prefixUrl;
|
||||
if (fetch_1.fetchIsAvailable && (!preferXHR || !xhr_1.xhrIsAvailable)) {
|
||||
_this._requestFileAsyncInternal = fetch_1.fetchFileAsync;
|
||||
// this._requestFileSizeAsyncInternal = fetchFileSizeAsync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileAsyncInternal = xhr_1.asyncDownloadFile;
|
||||
// this._requestFileSizeAsyncInternal = getFileSizeAsync;
|
||||
}
|
||||
if (xhr_1.xhrIsAvailable) {
|
||||
_this._requestFileSyncInternal = xhr_1.syncDownloadFile;
|
||||
// this._requestFileSizeSyncInternal = getFileSizeSync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileSyncInternal = syncNotAvailableError;
|
||||
// this._requestFileSizeSyncInternal = syncNotAvailableError;
|
||||
}
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Construct an DynamicHTTPRequest file system backend with the given options.
|
||||
*/
|
||||
DynamicHTTPRequest.Create = function (opts, cb) {
|
||||
cb(null, new DynamicHTTPRequest(opts.baseUrl));
|
||||
};
|
||||
DynamicHTTPRequest.isAvailable = function () {
|
||||
return xhr_1.xhrIsAvailable || fetch_1.fetchIsAvailable;
|
||||
};
|
||||
DynamicHTTPRequest.prototype.convertAPIError = function (error) {
|
||||
return new api_error_1.ApiError(error.errno, error.message, error.path);
|
||||
};
|
||||
DynamicHTTPRequest.prototype.empty = function () {
|
||||
// this._index.fileIterator(function(file: Stats) {
|
||||
// file.fileData = null;
|
||||
// });
|
||||
};
|
||||
DynamicHTTPRequest.prototype.getName = function () {
|
||||
return DynamicHTTPRequest.Name;
|
||||
};
|
||||
DynamicHTTPRequest.prototype.diskSpace = function (path, cb) {
|
||||
// Read-only file system. We could calculate the total space, but that's not
|
||||
// important right now.
|
||||
cb(0, 0);
|
||||
};
|
||||
DynamicHTTPRequest.prototype.isReadOnly = function () {
|
||||
return true;
|
||||
};
|
||||
DynamicHTTPRequest.prototype.supportsLinks = function () {
|
||||
return false;
|
||||
};
|
||||
DynamicHTTPRequest.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
DynamicHTTPRequest.prototype.supportsSynch = function () {
|
||||
// Synchronous operations are only available via the XHR interface for now.
|
||||
return xhr_1.xhrIsAvailable;
|
||||
};
|
||||
DynamicHTTPRequest.prototype.stat = function (path, isLstat, cb) {
|
||||
var _this = this;
|
||||
this._requestFileAsync(path + '?stat', 'json', function (err, data) {
|
||||
if (err || data.error) {
|
||||
cb(err || _this.convertAPIError(data.error));
|
||||
}
|
||||
else {
|
||||
cb(null, node_fs_stats_1.default.fromBuffer(Buffer.from(data.stats)));
|
||||
}
|
||||
});
|
||||
};
|
||||
DynamicHTTPRequest.prototype.statSync = function (path, isLstat) {
|
||||
var data = this._requestFileSync(path + '?stat', 'json');
|
||||
if (data.error) {
|
||||
throw this.convertAPIError(data.error);
|
||||
}
|
||||
return node_fs_stats_1.default.fromBuffer(Buffer.from(data.stats));
|
||||
};
|
||||
DynamicHTTPRequest.prototype.open = function (path, flags, mode, cb) {
|
||||
var _this = this;
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path));
|
||||
}
|
||||
var self = this;
|
||||
this._requestFileAsync(path, 'json', function (err, data) {
|
||||
if (err || data.error) {
|
||||
return cb(err || _this.convertAPIError(data.error));
|
||||
}
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.fromBuffer(Buffer.from(data.stats)), Buffer.from(data.result)));
|
||||
});
|
||||
};
|
||||
DynamicHTTPRequest.prototype.openSync = function (path, flags, mode) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path);
|
||||
}
|
||||
var self = this;
|
||||
var data = this._requestFileSync(path, 'json');
|
||||
if (data.error) {
|
||||
throw this.convertAPIError(data.error);
|
||||
}
|
||||
return new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.fromBuffer(Buffer.from(data.stats)), Buffer.from(data.result));
|
||||
};
|
||||
DynamicHTTPRequest.prototype.readdir = function (path, cb) {
|
||||
try {
|
||||
cb(null, this.readdirSync(path));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
DynamicHTTPRequest.prototype.readdirSync = function (path) {
|
||||
// Check if it exists.
|
||||
var data = this._requestFileSync(path + '?meta', 'json');
|
||||
if (data.error) {
|
||||
throw this.convertAPIError(data.error);
|
||||
}
|
||||
return data.result;
|
||||
};
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
DynamicHTTPRequest.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
// Wrap cb in file closing code.
|
||||
var oldCb = cb;
|
||||
// Get file.
|
||||
this.open(fname, flag, 0x1a4, function (err, fd) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
cb = function (err, arg) {
|
||||
fd.close(function (err2) {
|
||||
if (!err) {
|
||||
err = err2;
|
||||
}
|
||||
return oldCb(err, arg);
|
||||
});
|
||||
};
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
cb(err, (0, util_1.copyingSlice)(fdBuff));
|
||||
}
|
||||
else {
|
||||
tryToString(fdBuff, encoding, cb);
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
DynamicHTTPRequest.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
// Get file.
|
||||
var fd = this.openSync(fname, flag, 0x1a4);
|
||||
try {
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
return (0, util_1.copyingSlice)(fdBuff);
|
||||
}
|
||||
return fdBuff.toString(encoding);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
DynamicHTTPRequest.prototype._getHTTPPath = function (filePath) {
|
||||
if (filePath.charAt(0) === '/') {
|
||||
filePath = filePath.slice(1);
|
||||
}
|
||||
return this.prefixUrl + filePath;
|
||||
};
|
||||
DynamicHTTPRequest.prototype._requestFileAsync = function (p, type, cb) {
|
||||
this._requestFileAsyncInternal(this._getHTTPPath(p), type, cb);
|
||||
};
|
||||
DynamicHTTPRequest.prototype._requestFileSync = function (p, type) {
|
||||
return this._requestFileSyncInternal(this._getHTTPPath(p), type);
|
||||
};
|
||||
DynamicHTTPRequest.Name = "DynamicHTTPRequest";
|
||||
DynamicHTTPRequest.Options = {
|
||||
baseUrl: {
|
||||
type: "string",
|
||||
optional: true,
|
||||
description: "Used as the URL prefix for fetched files. Default: Fetch files relative to the index."
|
||||
},
|
||||
preferXHR: {
|
||||
type: "boolean",
|
||||
optional: true,
|
||||
description: "Whether to prefer XmlHttpRequest or fetch for async operations if both are available. Default: false"
|
||||
}
|
||||
};
|
||||
return DynamicHTTPRequest;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = DynamicHTTPRequest;
|
||||
//# sourceMappingURL=DynamicHTTPRequest.js.map
|
||||
72
sandpack-generated/static/browserfs11/node/backend/Emscripten.d.ts
vendored
Normal file
72
sandpack-generated/static/browserfs11/node/backend/Emscripten.d.ts
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
/// <reference types="node" />
|
||||
import { SynchronousFileSystem, BFSOneArgCallback, BFSCallback, BFSThreeArgCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { BaseFile, File } from '../core/file';
|
||||
export declare class EmscriptenFile extends BaseFile implements File {
|
||||
private _fs;
|
||||
private _FS;
|
||||
private _path;
|
||||
private _stream;
|
||||
constructor(_fs: EmscriptenFileSystem, _FS: any, _path: string, _stream: any);
|
||||
getPos(): number | undefined;
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
closeSync(): void;
|
||||
stat(cb: BFSCallback<Stats>): void;
|
||||
statSync(): Stats;
|
||||
truncate(len: number, cb: BFSOneArgCallback): void;
|
||||
truncateSync(len: number): void;
|
||||
write(buffer: Buffer, offset: number, length: number, position: number, cb: BFSThreeArgCallback<number, Buffer>): void;
|
||||
writeSync(buffer: Buffer, offset: number, length: number, position: number | null): number;
|
||||
read(buffer: Buffer, offset: number, length: number, position: number, cb: BFSThreeArgCallback<number, Buffer>): void;
|
||||
readSync(buffer: Buffer, offset: number, length: number, position: number | null): number;
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
syncSync(): void;
|
||||
chown(uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
chownSync(uid: number, gid: number): void;
|
||||
chmod(mode: number, cb: BFSOneArgCallback): void;
|
||||
chmodSync(mode: number): void;
|
||||
utimes(atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
utimesSync(atime: Date, mtime: Date): void;
|
||||
}
|
||||
/**
|
||||
* Configuration options for Emscripten file systems.
|
||||
*/
|
||||
export interface EmscriptenFileSystemOptions {
|
||||
FS: any;
|
||||
}
|
||||
/**
|
||||
* Mounts an Emscripten file system into the BrowserFS file system.
|
||||
*/
|
||||
export default class EmscriptenFileSystem extends SynchronousFileSystem {
|
||||
static readonly Name = "EmscriptenFileSystem";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Create an EmscriptenFileSystem instance with the given options.
|
||||
*/
|
||||
static Create(opts: EmscriptenFileSystemOptions, cb: BFSCallback<EmscriptenFileSystem>): void;
|
||||
static isAvailable(): boolean;
|
||||
private _FS;
|
||||
private constructor();
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
statSync(p: string, isLstat: boolean): Stats;
|
||||
openSync(p: string, flag: FileFlag, mode: number): EmscriptenFile;
|
||||
unlinkSync(p: string): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
readdirSync(p: string): string[];
|
||||
truncateSync(p: string, len: number): void;
|
||||
readFileSync(p: string, encoding: string, flag: FileFlag): any;
|
||||
writeFileSync(p: string, data: any, encoding: string, flag: FileFlag, mode: number): void;
|
||||
chmodSync(p: string, isLchmod: boolean, mode: number): void;
|
||||
chownSync(p: string, isLchown: boolean, uid: number, gid: number): void;
|
||||
symlinkSync(srcpath: string, dstpath: string, type: string): void;
|
||||
readlinkSync(p: string): string;
|
||||
utimesSync(p: string, atime: Date, mtime: Date): void;
|
||||
private modeToFileType;
|
||||
}
|
||||
404
sandpack-generated/static/browserfs11/node/backend/Emscripten.js
Normal file
404
sandpack-generated/static/browserfs11/node/backend/Emscripten.js
Normal file
@@ -0,0 +1,404 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.EmscriptenFile = void 0;
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var file_1 = require("../core/file");
|
||||
var util_1 = require("../core/util");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function convertError(e, path) {
|
||||
if (path === void 0) { path = ''; }
|
||||
var errno = e.errno;
|
||||
var parent = e.node;
|
||||
var paths = [];
|
||||
while (parent) {
|
||||
paths.unshift(parent.name);
|
||||
if (parent === parent.parent) {
|
||||
break;
|
||||
}
|
||||
parent = parent.parent;
|
||||
}
|
||||
return new api_error_1.ApiError(errno, api_error_1.ErrorStrings[errno], paths.length > 0 ? '/' + paths.join('/') : path);
|
||||
}
|
||||
var EmscriptenFile = /** @class */ (function (_super) {
|
||||
__extends(EmscriptenFile, _super);
|
||||
function EmscriptenFile(_fs, _FS, _path, _stream) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this._fs = _fs;
|
||||
_this._FS = _FS;
|
||||
_this._path = _path;
|
||||
_this._stream = _stream;
|
||||
return _this;
|
||||
}
|
||||
EmscriptenFile.prototype.getPos = function () {
|
||||
return undefined;
|
||||
};
|
||||
EmscriptenFile.prototype.close = function (cb) {
|
||||
var err = null;
|
||||
try {
|
||||
this.closeSync();
|
||||
}
|
||||
catch (e) {
|
||||
err = e;
|
||||
}
|
||||
finally {
|
||||
cb(err);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.closeSync = function () {
|
||||
try {
|
||||
this._FS.close(this._stream);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, this._path);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.stat = function (cb) {
|
||||
try {
|
||||
cb(null, this.statSync());
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.statSync = function () {
|
||||
try {
|
||||
return this._fs.statSync(this._path, false);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, this._path);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.truncate = function (len, cb) {
|
||||
var err = null;
|
||||
try {
|
||||
this.truncateSync(len);
|
||||
}
|
||||
catch (e) {
|
||||
err = e;
|
||||
}
|
||||
finally {
|
||||
cb(err);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.truncateSync = function (len) {
|
||||
try {
|
||||
this._FS.ftruncate(this._stream.fd, len);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, this._path);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.write = function (buffer, offset, length, position, cb) {
|
||||
try {
|
||||
cb(null, this.writeSync(buffer, offset, length, position), buffer);
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.writeSync = function (buffer, offset, length, position) {
|
||||
try {
|
||||
var u8 = (0, util_1.buffer2Uint8array)(buffer);
|
||||
// Emscripten is particular about what position is set to.
|
||||
var emPosition = position === null ? undefined : position;
|
||||
return this._FS.write(this._stream, u8, offset, length, emPosition);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, this._path);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.read = function (buffer, offset, length, position, cb) {
|
||||
try {
|
||||
cb(null, this.readSync(buffer, offset, length, position), buffer);
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.readSync = function (buffer, offset, length, position) {
|
||||
try {
|
||||
var u8 = (0, util_1.buffer2Uint8array)(buffer);
|
||||
// Emscripten is particular about what position is set to.
|
||||
var emPosition = position === null ? undefined : position;
|
||||
return this._FS.read(this._stream, u8, offset, length, emPosition);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, this._path);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.sync = function (cb) {
|
||||
// NOP.
|
||||
cb();
|
||||
};
|
||||
EmscriptenFile.prototype.syncSync = function () {
|
||||
// NOP.
|
||||
};
|
||||
EmscriptenFile.prototype.chown = function (uid, gid, cb) {
|
||||
var err = null;
|
||||
try {
|
||||
this.chownSync(uid, gid);
|
||||
}
|
||||
catch (e) {
|
||||
err = e;
|
||||
}
|
||||
finally {
|
||||
cb(err);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.chownSync = function (uid, gid) {
|
||||
try {
|
||||
this._FS.fchown(this._stream.fd, uid, gid);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, this._path);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.chmod = function (mode, cb) {
|
||||
var err = null;
|
||||
try {
|
||||
this.chmodSync(mode);
|
||||
}
|
||||
catch (e) {
|
||||
err = e;
|
||||
}
|
||||
finally {
|
||||
cb(err);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.chmodSync = function (mode) {
|
||||
try {
|
||||
this._FS.fchmod(this._stream.fd, mode);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, this._path);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.utimes = function (atime, mtime, cb) {
|
||||
var err = null;
|
||||
try {
|
||||
this.utimesSync(atime, mtime);
|
||||
}
|
||||
catch (e) {
|
||||
err = e;
|
||||
}
|
||||
finally {
|
||||
cb(err);
|
||||
}
|
||||
};
|
||||
EmscriptenFile.prototype.utimesSync = function (atime, mtime) {
|
||||
this._fs.utimesSync(this._path, atime, mtime);
|
||||
};
|
||||
return EmscriptenFile;
|
||||
}(file_1.BaseFile));
|
||||
exports.EmscriptenFile = EmscriptenFile;
|
||||
/**
|
||||
* Mounts an Emscripten file system into the BrowserFS file system.
|
||||
*/
|
||||
var EmscriptenFileSystem = /** @class */ (function (_super) {
|
||||
__extends(EmscriptenFileSystem, _super);
|
||||
function EmscriptenFileSystem(_FS) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this._FS = _FS;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Create an EmscriptenFileSystem instance with the given options.
|
||||
*/
|
||||
EmscriptenFileSystem.Create = function (opts, cb) {
|
||||
cb(null, new EmscriptenFileSystem(opts.FS));
|
||||
};
|
||||
EmscriptenFileSystem.isAvailable = function () { return true; };
|
||||
EmscriptenFileSystem.prototype.getName = function () { return this._FS.DB_NAME(); };
|
||||
EmscriptenFileSystem.prototype.isReadOnly = function () { return false; };
|
||||
EmscriptenFileSystem.prototype.supportsLinks = function () { return true; };
|
||||
EmscriptenFileSystem.prototype.supportsProps = function () { return true; };
|
||||
EmscriptenFileSystem.prototype.supportsSynch = function () { return true; };
|
||||
EmscriptenFileSystem.prototype.renameSync = function (oldPath, newPath) {
|
||||
try {
|
||||
this._FS.rename(oldPath, newPath);
|
||||
}
|
||||
catch (e) {
|
||||
if (e.errno === api_error_1.ErrorCode.ENOENT) {
|
||||
throw convertError(e, this.existsSync(oldPath) ? newPath : oldPath);
|
||||
}
|
||||
else {
|
||||
throw convertError(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.statSync = function (p, isLstat) {
|
||||
try {
|
||||
var stats = isLstat ? this._FS.lstat(p) : this._FS.stat(p);
|
||||
var itemType = this.modeToFileType(stats.mode);
|
||||
return new node_fs_stats_1.default(itemType, stats.size, stats.mode, stats.atime.getTime(), stats.mtime.getTime(), stats.ctime.getTime());
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.openSync = function (p, flag, mode) {
|
||||
try {
|
||||
var stream = this._FS.open(p, flag.getFlagString(), mode);
|
||||
if (this._FS.isDir(stream.node.mode)) {
|
||||
this._FS.close(stream);
|
||||
throw api_error_1.ApiError.EISDIR(p);
|
||||
}
|
||||
return new EmscriptenFile(this, this._FS, p, stream);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.unlinkSync = function (p) {
|
||||
try {
|
||||
this._FS.unlink(p);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.rmdirSync = function (p) {
|
||||
try {
|
||||
this._FS.rmdir(p);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.mkdirSync = function (p, mode) {
|
||||
try {
|
||||
this._FS.mkdir(p, mode);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.readdirSync = function (p) {
|
||||
try {
|
||||
// Emscripten returns items for '.' and '..'. Node does not.
|
||||
return this._FS.readdir(p).filter(function (p) { return p !== '.' && p !== '..'; });
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.truncateSync = function (p, len) {
|
||||
try {
|
||||
this._FS.truncate(p, len);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.readFileSync = function (p, encoding, flag) {
|
||||
try {
|
||||
var data = this._FS.readFile(p, { flags: flag.getFlagString() });
|
||||
var buff = (0, util_1.uint8Array2Buffer)(data);
|
||||
if (encoding) {
|
||||
return buff.toString(encoding);
|
||||
}
|
||||
else {
|
||||
return buff;
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.writeFileSync = function (p, data, encoding, flag, mode) {
|
||||
try {
|
||||
if (encoding) {
|
||||
data = Buffer.from(data, encoding);
|
||||
}
|
||||
var u8 = (0, util_1.buffer2Uint8array)(data);
|
||||
this._FS.writeFile(p, u8, { flags: flag.getFlagString(), encoding: 'binary' });
|
||||
this._FS.chmod(p, mode);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.chmodSync = function (p, isLchmod, mode) {
|
||||
try {
|
||||
isLchmod ? this._FS.lchmod(p, mode) : this._FS.chmod(p, mode);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.chownSync = function (p, isLchown, uid, gid) {
|
||||
try {
|
||||
isLchown ? this._FS.lchown(p, uid, gid) : this._FS.chown(p, uid, gid);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.symlinkSync = function (srcpath, dstpath, type) {
|
||||
try {
|
||||
this._FS.symlink(srcpath, dstpath);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.readlinkSync = function (p) {
|
||||
try {
|
||||
return this._FS.readlink(p);
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.utimesSync = function (p, atime, mtime) {
|
||||
try {
|
||||
this._FS.utime(p, atime.getTime(), mtime.getTime());
|
||||
}
|
||||
catch (e) {
|
||||
throw convertError(e, p);
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.prototype.modeToFileType = function (mode) {
|
||||
if (this._FS.isDir(mode)) {
|
||||
return node_fs_stats_1.FileType.DIRECTORY;
|
||||
}
|
||||
else if (this._FS.isFile(mode)) {
|
||||
return node_fs_stats_1.FileType.FILE;
|
||||
}
|
||||
else if (this._FS.isLink(mode)) {
|
||||
return node_fs_stats_1.FileType.SYMLINK;
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EPERM("Invalid mode: ".concat(mode));
|
||||
}
|
||||
};
|
||||
EmscriptenFileSystem.Name = "EmscriptenFileSystem";
|
||||
EmscriptenFileSystem.Options = {
|
||||
FS: {
|
||||
type: "object",
|
||||
description: "The Emscripten file system to use (the `FS` variable)"
|
||||
}
|
||||
};
|
||||
return EmscriptenFileSystem;
|
||||
}(file_system_1.SynchronousFileSystem));
|
||||
exports.default = EmscriptenFileSystem;
|
||||
//# sourceMappingURL=Emscripten.js.map
|
||||
48
sandpack-generated/static/browserfs11/node/backend/FolderAdapter.d.ts
vendored
Normal file
48
sandpack-generated/static/browserfs11/node/backend/FolderAdapter.d.ts
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
import { BaseFileSystem, FileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
/**
|
||||
* Configuration options for a FolderAdapter file system.
|
||||
*/
|
||||
export interface FolderAdapterOptions {
|
||||
folder: string;
|
||||
wrapped: FileSystem;
|
||||
}
|
||||
/**
|
||||
* The FolderAdapter file system wraps a file system, and scopes all interactions to a subfolder of that file system.
|
||||
*
|
||||
* Example: Given a file system `foo` with folder `bar` and file `bar/baz`...
|
||||
*
|
||||
* ```javascript
|
||||
* BrowserFS.configure({
|
||||
* fs: "FolderAdapter",
|
||||
* options: {
|
||||
* folder: "bar",
|
||||
* wrapped: foo
|
||||
* }
|
||||
* }, function(e) {
|
||||
* var fs = BrowserFS.BFSRequire('fs');
|
||||
* fs.readdirSync('/'); // ['baz']
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export default class FolderAdapter extends BaseFileSystem implements FileSystem {
|
||||
static readonly Name = "FolderAdapter";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates a FolderAdapter instance with the given options.
|
||||
*/
|
||||
static Create(opts: FolderAdapterOptions, cb: BFSCallback<FolderAdapter>): void;
|
||||
static isAvailable(): boolean;
|
||||
_wrapped: FileSystem;
|
||||
_folder: string;
|
||||
private constructor();
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
/**
|
||||
* Initialize the file system. Ensures that the wrapped file system
|
||||
* has the given folder.
|
||||
*/
|
||||
private _initialize;
|
||||
}
|
||||
@@ -0,0 +1,182 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var path = require("path");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
/**
|
||||
* The FolderAdapter file system wraps a file system, and scopes all interactions to a subfolder of that file system.
|
||||
*
|
||||
* Example: Given a file system `foo` with folder `bar` and file `bar/baz`...
|
||||
*
|
||||
* ```javascript
|
||||
* BrowserFS.configure({
|
||||
* fs: "FolderAdapter",
|
||||
* options: {
|
||||
* folder: "bar",
|
||||
* wrapped: foo
|
||||
* }
|
||||
* }, function(e) {
|
||||
* var fs = BrowserFS.BFSRequire('fs');
|
||||
* fs.readdirSync('/'); // ['baz']
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
var FolderAdapter = /** @class */ (function (_super) {
|
||||
__extends(FolderAdapter, _super);
|
||||
function FolderAdapter(folder, wrapped) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this._folder = folder;
|
||||
_this._wrapped = wrapped;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Creates a FolderAdapter instance with the given options.
|
||||
*/
|
||||
FolderAdapter.Create = function (opts, cb) {
|
||||
var fa = new FolderAdapter(opts.folder, opts.wrapped);
|
||||
fa._initialize(function (e) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
cb(null, fa);
|
||||
}
|
||||
});
|
||||
};
|
||||
FolderAdapter.isAvailable = function () {
|
||||
return true;
|
||||
};
|
||||
FolderAdapter.prototype.getName = function () { return this._wrapped.getName(); };
|
||||
FolderAdapter.prototype.isReadOnly = function () { return this._wrapped.isReadOnly(); };
|
||||
FolderAdapter.prototype.supportsProps = function () { return this._wrapped.supportsProps(); };
|
||||
FolderAdapter.prototype.supportsSynch = function () { return this._wrapped.supportsSynch(); };
|
||||
FolderAdapter.prototype.supportsLinks = function () { return false; };
|
||||
/**
|
||||
* Initialize the file system. Ensures that the wrapped file system
|
||||
* has the given folder.
|
||||
*/
|
||||
FolderAdapter.prototype._initialize = function (cb) {
|
||||
var _this = this;
|
||||
this._wrapped.exists(this._folder, function (exists) {
|
||||
if (exists) {
|
||||
cb();
|
||||
}
|
||||
else if (_this._wrapped.isReadOnly()) {
|
||||
cb(api_error_1.ApiError.ENOENT(_this._folder));
|
||||
}
|
||||
else {
|
||||
_this._wrapped.mkdir(_this._folder, 0x1ff, cb);
|
||||
}
|
||||
});
|
||||
};
|
||||
FolderAdapter.Name = "FolderAdapter";
|
||||
FolderAdapter.Options = {
|
||||
folder: {
|
||||
type: "string",
|
||||
description: "The folder to use as the root directory"
|
||||
},
|
||||
wrapped: {
|
||||
type: "object",
|
||||
description: "The file system to wrap"
|
||||
}
|
||||
};
|
||||
return FolderAdapter;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = FolderAdapter;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function translateError(folder, e) {
|
||||
if (e !== null && typeof e === 'object') {
|
||||
var err = e;
|
||||
var p = err.path;
|
||||
if (p) {
|
||||
p = '/' + path.relative(folder, p);
|
||||
err.message = err.message.replace(err.path, p);
|
||||
err.path = p;
|
||||
}
|
||||
}
|
||||
return e;
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function wrapCallback(folder, cb) {
|
||||
if (typeof cb === 'function') {
|
||||
return function (err) {
|
||||
if (arguments.length > 0) {
|
||||
arguments[0] = translateError(folder, err);
|
||||
}
|
||||
cb.apply(null, arguments);
|
||||
};
|
||||
}
|
||||
else {
|
||||
return cb;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function wrapFunction(name, wrapFirst, wrapSecond) {
|
||||
if (name.slice(name.length - 4) !== 'Sync') {
|
||||
// Async function. Translate error in callback.
|
||||
return function () {
|
||||
if (arguments.length > 0) {
|
||||
if (wrapFirst) {
|
||||
arguments[0] = path.join(this._folder, arguments[0]);
|
||||
}
|
||||
if (wrapSecond) {
|
||||
arguments[1] = path.join(this._folder, arguments[1]);
|
||||
}
|
||||
arguments[arguments.length - 1] = wrapCallback(this._folder, arguments[arguments.length - 1]);
|
||||
}
|
||||
return this._wrapped[name].apply(this._wrapped, arguments);
|
||||
};
|
||||
}
|
||||
else {
|
||||
// Sync function. Translate error in catch.
|
||||
return function () {
|
||||
try {
|
||||
if (wrapFirst) {
|
||||
arguments[0] = path.join(this._folder, arguments[0]);
|
||||
}
|
||||
if (wrapSecond) {
|
||||
arguments[1] = path.join(this._folder, arguments[1]);
|
||||
}
|
||||
return this._wrapped[name].apply(this._wrapped, arguments);
|
||||
}
|
||||
catch (e) {
|
||||
throw translateError(this._folder, e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
// First argument is a path.
|
||||
['diskSpace', 'stat', 'statSync', 'open', 'openSync', 'unlink', 'unlinkSync',
|
||||
'rmdir', 'rmdirSync', 'mkdir', 'mkdirSync', 'readdir', 'readdirSync', 'exists',
|
||||
'existsSync', 'realpath', 'realpathSync', 'truncate', 'truncateSync', 'readFile',
|
||||
'readFileSync', 'writeFile', 'writeFileSync', 'appendFile', 'appendFileSync',
|
||||
'chmod', 'chmodSync', 'chown', 'chownSync', 'utimes', 'utimesSync', 'readlink',
|
||||
'readlinkSync'].forEach(function (name) {
|
||||
FolderAdapter.prototype[name] = wrapFunction(name, true, false);
|
||||
});
|
||||
// First and second arguments are paths.
|
||||
['rename', 'renameSync', 'link', 'linkSync', 'symlink', 'symlinkSync'].forEach(function (name) {
|
||||
FolderAdapter.prototype[name] = wrapFunction(name, true, true);
|
||||
});
|
||||
//# sourceMappingURL=FolderAdapter.js.map
|
||||
80
sandpack-generated/static/browserfs11/node/backend/HTML5FS.d.ts
vendored
Normal file
80
sandpack-generated/static/browserfs11/node/backend/HTML5FS.d.ts
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
/// <reference types="filesystem" />
|
||||
/// <reference types="node" />
|
||||
import PreloadFile from '../generic/preload_file';
|
||||
import { BaseFileSystem, FileSystem as IFileSystem, BFSOneArgCallback, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { File as IFile } from '../core/file';
|
||||
export declare class HTML5FSFile extends PreloadFile<HTML5FS> implements IFile {
|
||||
private _entry;
|
||||
constructor(fs: HTML5FS, entry: FileEntry, path: string, flag: FileFlag, stat: Stats, contents?: Buffer);
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
}
|
||||
export interface HTML5FSOptions {
|
||||
size?: number;
|
||||
type?: number;
|
||||
}
|
||||
/**
|
||||
* A read-write filesystem backed by the HTML5 FileSystem API.
|
||||
*
|
||||
* As the HTML5 FileSystem is only implemented in Blink, this interface is
|
||||
* only available in Chrome.
|
||||
*/
|
||||
export default class HTML5FS extends BaseFileSystem implements IFileSystem {
|
||||
static readonly Name = "HTML5FS";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates an HTML5FS instance with the given options.
|
||||
*/
|
||||
static Create(opts: HTML5FSOptions, cb: BFSCallback<HTML5FS>): void;
|
||||
static isAvailable(): boolean;
|
||||
fs: FileSystem;
|
||||
private size;
|
||||
private type;
|
||||
/**
|
||||
* @param size storage quota to request, in megabytes. Allocated value may be less.
|
||||
* @param type window.PERSISTENT or window.TEMPORARY. Defaults to PERSISTENT.
|
||||
*/
|
||||
private constructor();
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsSymlinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Deletes everything in the FS. Used for testing.
|
||||
* Karma clears the storage after you quit it but not between runs of the test
|
||||
* suite, and the tests expect an empty FS every time.
|
||||
*/
|
||||
empty(mainCb: BFSOneArgCallback): void;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
stat(path: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
open(p: string, flags: FileFlag, mode: number, cb: BFSCallback<IFile>): void;
|
||||
unlink(path: string, cb: BFSOneArgCallback): void;
|
||||
rmdir(path: string, cb: BFSOneArgCallback): void;
|
||||
mkdir(path: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Map _readdir's list of `FileEntry`s to their names and return that.
|
||||
*/
|
||||
readdir(path: string, cb: BFSCallback<string[]>): void;
|
||||
/**
|
||||
* Returns a BrowserFS object representing a File.
|
||||
*/
|
||||
private _makeFile;
|
||||
/**
|
||||
* Returns an array of `FileEntry`s. Used internally by empty and readdir.
|
||||
*/
|
||||
private _readdir;
|
||||
/**
|
||||
* Requests a storage quota from the browser to back this FS.
|
||||
*/
|
||||
private _allocate;
|
||||
/**
|
||||
* Delete a file or directory from the file system
|
||||
* isFile should reflect which call was made to remove the it (`unlink` or
|
||||
* `rmdir`). If this doesn't match what's actually at `path`, an error will be
|
||||
* returned
|
||||
*/
|
||||
private _remove;
|
||||
}
|
||||
516
sandpack-generated/static/browserfs11/node/backend/HTML5FS.js
Normal file
516
sandpack-generated/static/browserfs11/node/backend/HTML5FS.js
Normal file
@@ -0,0 +1,516 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.HTML5FSFile = void 0;
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var file_flag_1 = require("../core/file_flag");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var path = require("path");
|
||||
var global_1 = require("../core/global");
|
||||
var async_1 = require("async");
|
||||
var util_1 = require("../core/util");
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function isDirectoryEntry(entry) {
|
||||
return entry.isDirectory;
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var _getFS = global_1.default.webkitRequestFileSystem || global_1.default.requestFileSystem || null;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function _requestQuota(type, size, success, errorCallback) {
|
||||
// We cast navigator and window to '<any>' because everything here is
|
||||
// nonstandard functionality, despite the fact that Chrome has the only
|
||||
// implementation of the HTML5FS and is likely driving the standardization
|
||||
// process. Thus, these objects defined off of navigator and window are not
|
||||
// present in the DefinitelyTyped TypeScript typings for FileSystem.
|
||||
if (typeof navigator['webkitPersistentStorage'] !== 'undefined') {
|
||||
switch (type) {
|
||||
case global_1.default.PERSISTENT:
|
||||
navigator.webkitPersistentStorage.requestQuota(size, success, errorCallback);
|
||||
break;
|
||||
case global_1.default.TEMPORARY:
|
||||
navigator.webkitTemporaryStorage.requestQuota(size, success, errorCallback);
|
||||
break;
|
||||
default:
|
||||
errorCallback(new TypeError("Invalid storage type: ".concat(type)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
global_1.default.webkitStorageInfo.requestQuota(type, size, success, errorCallback);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function _toArray(list) {
|
||||
return Array.prototype.slice.call(list || [], 0);
|
||||
}
|
||||
/**
|
||||
* Converts the given DOMError into an appropriate ApiError.
|
||||
* @url https://developer.mozilla.org/en-US/docs/Web/API/DOMError
|
||||
* @hidden
|
||||
*/
|
||||
// @ts-ignore
|
||||
function convertError(err, p, expectedDir) {
|
||||
switch (err.name) {
|
||||
/* The user agent failed to create a file or directory due to the existence of a file or
|
||||
directory with the same path. */
|
||||
case "PathExistsError":
|
||||
return api_error_1.ApiError.EEXIST(p);
|
||||
/* The operation failed because it would cause the application to exceed its storage quota. */
|
||||
case 'QuotaExceededError':
|
||||
return api_error_1.ApiError.FileError(api_error_1.ErrorCode.ENOSPC, p);
|
||||
/* A required file or directory could not be found at the time an operation was processed. */
|
||||
case 'NotFoundError':
|
||||
return api_error_1.ApiError.ENOENT(p);
|
||||
/* This is a security error code to be used in situations not covered by any other error codes.
|
||||
- A required file was unsafe for access within a Web application
|
||||
- Too many calls are being made on filesystem resources */
|
||||
case 'SecurityError':
|
||||
return api_error_1.ApiError.FileError(api_error_1.ErrorCode.EACCES, p);
|
||||
/* The modification requested was illegal. Examples of invalid modifications include moving a
|
||||
directory into its own child, moving a file into its parent directory without changing its name,
|
||||
or copying a directory to a path occupied by a file. */
|
||||
case 'InvalidModificationError':
|
||||
return api_error_1.ApiError.FileError(api_error_1.ErrorCode.EPERM, p);
|
||||
/* The user has attempted to look up a file or directory, but the Entry found is of the wrong type
|
||||
[e.g. is a DirectoryEntry when the user requested a FileEntry]. */
|
||||
case 'TypeMismatchError':
|
||||
return api_error_1.ApiError.FileError(expectedDir ? api_error_1.ErrorCode.ENOTDIR : api_error_1.ErrorCode.EISDIR, p);
|
||||
/* A path or URL supplied to the API was malformed. */
|
||||
case "EncodingError":
|
||||
/* An operation depended on state cached in an interface object, but that state that has changed
|
||||
since it was read from disk. */
|
||||
case "InvalidStateError":
|
||||
/* The user attempted to write to a file or directory which could not be modified due to the state
|
||||
of the underlying filesystem. */
|
||||
case "NoModificationAllowedError":
|
||||
default:
|
||||
return api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, p);
|
||||
}
|
||||
}
|
||||
// A note about getFile and getDirectory options:
|
||||
// These methods are called at numerous places in this file, and are passed
|
||||
// some combination of these two options:
|
||||
// - create: If true, the entry will be created if it doesn't exist.
|
||||
// If false, an error will be thrown if it doesn't exist.
|
||||
// - exclusive: If true, only create the entry if it doesn't already exist,
|
||||
// and throw an error if it does.
|
||||
var HTML5FSFile = /** @class */ (function (_super) {
|
||||
__extends(HTML5FSFile, _super);
|
||||
function HTML5FSFile(fs, entry, path, flag, stat, contents) {
|
||||
var _this = _super.call(this, fs, path, flag, stat, contents) || this;
|
||||
_this._entry = entry;
|
||||
return _this;
|
||||
}
|
||||
HTML5FSFile.prototype.sync = function (cb) {
|
||||
var _this = this;
|
||||
if (!this.isDirty()) {
|
||||
return cb();
|
||||
}
|
||||
this._entry.createWriter(function (writer) {
|
||||
var buffer = _this.getBuffer();
|
||||
var blob = new Blob([(0, util_1.buffer2ArrayBuffer)(buffer)]);
|
||||
var length = blob.size;
|
||||
writer.onwriteend = function (err) {
|
||||
writer.onwriteend = null;
|
||||
writer.onerror = null;
|
||||
writer.truncate(length);
|
||||
_this.resetDirty();
|
||||
cb();
|
||||
};
|
||||
writer.onerror = function (err) {
|
||||
cb(convertError(err, _this.getPath(), false));
|
||||
};
|
||||
writer.write(blob);
|
||||
});
|
||||
};
|
||||
HTML5FSFile.prototype.close = function (cb) {
|
||||
this.sync(cb);
|
||||
};
|
||||
return HTML5FSFile;
|
||||
}(preload_file_1.default));
|
||||
exports.HTML5FSFile = HTML5FSFile;
|
||||
/**
|
||||
* A read-write filesystem backed by the HTML5 FileSystem API.
|
||||
*
|
||||
* As the HTML5 FileSystem is only implemented in Blink, this interface is
|
||||
* only available in Chrome.
|
||||
*/
|
||||
var HTML5FS = /** @class */ (function (_super) {
|
||||
__extends(HTML5FS, _super);
|
||||
/**
|
||||
* @param size storage quota to request, in megabytes. Allocated value may be less.
|
||||
* @param type window.PERSISTENT or window.TEMPORARY. Defaults to PERSISTENT.
|
||||
*/
|
||||
function HTML5FS(size, type) {
|
||||
if (size === void 0) { size = 5; }
|
||||
if (type === void 0) { type = global_1.default.PERSISTENT; }
|
||||
var _this = _super.call(this) || this;
|
||||
// Convert MB to bytes.
|
||||
_this.size = 1024 * 1024 * size;
|
||||
_this.type = type;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Creates an HTML5FS instance with the given options.
|
||||
*/
|
||||
HTML5FS.Create = function (opts, cb) {
|
||||
var fs = new HTML5FS(opts.size, opts.type);
|
||||
fs._allocate(function (e) { return e ? cb(e) : cb(null, fs); });
|
||||
};
|
||||
HTML5FS.isAvailable = function () {
|
||||
return Boolean(_getFS);
|
||||
};
|
||||
HTML5FS.prototype.getName = function () {
|
||||
return HTML5FS.Name;
|
||||
};
|
||||
HTML5FS.prototype.isReadOnly = function () {
|
||||
return false;
|
||||
};
|
||||
HTML5FS.prototype.supportsSymlinks = function () {
|
||||
return false;
|
||||
};
|
||||
HTML5FS.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
HTML5FS.prototype.supportsSynch = function () {
|
||||
return false;
|
||||
};
|
||||
/**
|
||||
* Deletes everything in the FS. Used for testing.
|
||||
* Karma clears the storage after you quit it but not between runs of the test
|
||||
* suite, and the tests expect an empty FS every time.
|
||||
*/
|
||||
HTML5FS.prototype.empty = function (mainCb) {
|
||||
// Get a list of all entries in the root directory to delete them
|
||||
this._readdir('/', function (err, entries) {
|
||||
if (err) {
|
||||
mainCb(err);
|
||||
}
|
||||
else {
|
||||
// Called when every entry has been operated on
|
||||
var finished = function (er) {
|
||||
if (err) {
|
||||
mainCb(err);
|
||||
}
|
||||
else {
|
||||
mainCb();
|
||||
}
|
||||
};
|
||||
// Removes files and recursively removes directories
|
||||
var deleteEntry = function (entry, cb) {
|
||||
var succ = function () {
|
||||
cb();
|
||||
};
|
||||
var error = function (err) {
|
||||
cb(convertError(err, entry.fullPath, !entry.isDirectory));
|
||||
};
|
||||
if (isDirectoryEntry(entry)) {
|
||||
entry.removeRecursively(succ, error);
|
||||
}
|
||||
else {
|
||||
entry.remove(succ, error);
|
||||
}
|
||||
};
|
||||
// Loop through the entries and remove them, then call the callback
|
||||
// when they're all finished.
|
||||
// @ts-ignore
|
||||
(0, async_1.each)(entries, deleteEntry, finished);
|
||||
}
|
||||
});
|
||||
};
|
||||
HTML5FS.prototype.rename = function (oldPath, newPath, cb) {
|
||||
var _this = this;
|
||||
var semaphore = 2;
|
||||
var successCount = 0;
|
||||
var root = this.fs.root;
|
||||
var currentPath = oldPath;
|
||||
var error = function (err) {
|
||||
if (--semaphore <= 0) {
|
||||
cb(convertError(err, currentPath, false));
|
||||
}
|
||||
};
|
||||
var success = function (file) {
|
||||
if (++successCount === 2) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Something was identified as both a file and a directory. This should never happen."));
|
||||
}
|
||||
// SPECIAL CASE: If newPath === oldPath, and the path exists, then
|
||||
// this operation trivially succeeds.
|
||||
if (oldPath === newPath) {
|
||||
return cb();
|
||||
}
|
||||
// Get the new parent directory.
|
||||
currentPath = path.dirname(newPath);
|
||||
root.getDirectory(currentPath, {}, function (parentDir) {
|
||||
currentPath = path.basename(newPath);
|
||||
file.moveTo(parentDir, currentPath, function (entry) { cb(); }, function (err) {
|
||||
// SPECIAL CASE: If oldPath is a directory, and newPath is a
|
||||
// file, rename should delete the file and perform the move.
|
||||
if (file.isDirectory) {
|
||||
currentPath = newPath;
|
||||
// Unlink only works on files. Try to delete newPath.
|
||||
_this.unlink(newPath, function (e) {
|
||||
if (e) {
|
||||
// newPath is probably a directory.
|
||||
error(err);
|
||||
}
|
||||
else {
|
||||
// Recur, now that newPath doesn't exist.
|
||||
_this.rename(oldPath, newPath, cb);
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
error(err);
|
||||
}
|
||||
});
|
||||
}, error);
|
||||
};
|
||||
// We don't know if oldPath is a *file* or a *directory*, and there's no
|
||||
// way to stat items. So launch both requests, see which one succeeds.
|
||||
root.getFile(oldPath, {}, success, error);
|
||||
root.getDirectory(oldPath, {}, success, error);
|
||||
};
|
||||
HTML5FS.prototype.stat = function (path, isLstat, cb) {
|
||||
var _this = this;
|
||||
// Throw an error if the entry doesn't exist, because then there's nothing
|
||||
// to stat.
|
||||
var opts = {
|
||||
create: false
|
||||
};
|
||||
// Called when the path has been successfully loaded as a file.
|
||||
var loadAsFile = function (entry) {
|
||||
var fileFromEntry = function (file) {
|
||||
var stat = new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, file.size);
|
||||
cb(null, stat);
|
||||
};
|
||||
entry.file(fileFromEntry, failedToLoad);
|
||||
};
|
||||
// Called when the path has been successfully loaded as a directory.
|
||||
var loadAsDir = function (dir) {
|
||||
// Directory entry size can't be determined from the HTML5 FS API, and is
|
||||
// implementation-dependant anyway, so a dummy value is used.
|
||||
var size = 4096;
|
||||
var stat = new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, size);
|
||||
cb(null, stat);
|
||||
};
|
||||
// Called when the path couldn't be opened as a directory or a file.
|
||||
var failedToLoad = function (err) {
|
||||
cb(convertError(err, path, false /* Unknown / irrelevant */));
|
||||
};
|
||||
// Called when the path couldn't be opened as a file, but might still be a
|
||||
// directory.
|
||||
var failedToLoadAsFile = function () {
|
||||
_this.fs.root.getDirectory(path, opts, loadAsDir, failedToLoad);
|
||||
};
|
||||
// No method currently exists to determine whether a path refers to a
|
||||
// directory or a file, so this implementation tries both and uses the first
|
||||
// one that succeeds.
|
||||
this.fs.root.getFile(path, opts, loadAsFile, failedToLoadAsFile);
|
||||
};
|
||||
HTML5FS.prototype.open = function (p, flags, mode, cb) {
|
||||
var _this = this;
|
||||
// XXX: err is a DOMError
|
||||
var error = function (err) {
|
||||
if (err.name === 'InvalidModificationError' && flags.isExclusive()) {
|
||||
cb(api_error_1.ApiError.EEXIST(p));
|
||||
}
|
||||
else {
|
||||
cb(convertError(err, p, false));
|
||||
}
|
||||
};
|
||||
this.fs.root.getFile(p, {
|
||||
create: flags.pathNotExistsAction() === file_flag_1.ActionType.CREATE_FILE,
|
||||
exclusive: flags.isExclusive()
|
||||
}, function (entry) {
|
||||
// Try to fetch corresponding file.
|
||||
entry.file(function (file) {
|
||||
var reader = new FileReader();
|
||||
reader.onloadend = function (event) {
|
||||
var bfsFile = _this._makeFile(p, entry, flags, file, reader.result);
|
||||
cb(null, bfsFile);
|
||||
};
|
||||
reader.onerror = function (ev) {
|
||||
error(reader.error);
|
||||
};
|
||||
reader.readAsArrayBuffer(file);
|
||||
}, error);
|
||||
}, error);
|
||||
};
|
||||
HTML5FS.prototype.unlink = function (path, cb) {
|
||||
this._remove(path, cb, true);
|
||||
};
|
||||
HTML5FS.prototype.rmdir = function (path, cb) {
|
||||
var _this = this;
|
||||
// Check if directory is non-empty, first.
|
||||
this.readdir(path, function (e, files) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else if (files.length > 0) {
|
||||
cb(api_error_1.ApiError.ENOTEMPTY(path));
|
||||
}
|
||||
else {
|
||||
_this._remove(path, cb, false);
|
||||
}
|
||||
});
|
||||
};
|
||||
HTML5FS.prototype.mkdir = function (path, mode, cb) {
|
||||
// Create the directory, but throw an error if it already exists, as per
|
||||
// mkdir(1)
|
||||
var opts = {
|
||||
create: true,
|
||||
exclusive: true
|
||||
};
|
||||
var success = function (dir) {
|
||||
cb();
|
||||
};
|
||||
var error = function (err) {
|
||||
cb(convertError(err, path, true));
|
||||
};
|
||||
this.fs.root.getDirectory(path, opts, success, error);
|
||||
};
|
||||
/**
|
||||
* Map _readdir's list of `FileEntry`s to their names and return that.
|
||||
*/
|
||||
HTML5FS.prototype.readdir = function (path, cb) {
|
||||
this._readdir(path, function (e, entries) {
|
||||
if (entries) {
|
||||
var rv = [];
|
||||
for (var _i = 0, entries_1 = entries; _i < entries_1.length; _i++) {
|
||||
var entry = entries_1[_i];
|
||||
rv.push(entry.name);
|
||||
}
|
||||
cb(null, rv);
|
||||
}
|
||||
else {
|
||||
return cb(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Returns a BrowserFS object representing a File.
|
||||
*/
|
||||
HTML5FS.prototype._makeFile = function (path, entry, flag, stat, data) {
|
||||
if (data === void 0) { data = new ArrayBuffer(0); }
|
||||
var stats = new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, stat.size);
|
||||
var buffer = (0, util_1.arrayBuffer2Buffer)(data);
|
||||
return new HTML5FSFile(this, entry, path, flag, stats, buffer);
|
||||
};
|
||||
/**
|
||||
* Returns an array of `FileEntry`s. Used internally by empty and readdir.
|
||||
*/
|
||||
HTML5FS.prototype._readdir = function (path, cb) {
|
||||
var error = function (err) {
|
||||
cb(convertError(err, path, true));
|
||||
};
|
||||
// Grab the requested directory.
|
||||
this.fs.root.getDirectory(path, { create: false }, function (dirEntry) {
|
||||
var reader = dirEntry.createReader();
|
||||
var entries = [];
|
||||
// Call the reader.readEntries() until no more results are returned.
|
||||
var readEntries = function () {
|
||||
reader.readEntries((function (results) {
|
||||
if (results.length) {
|
||||
entries = entries.concat(_toArray(results));
|
||||
readEntries();
|
||||
}
|
||||
else {
|
||||
cb(null, entries);
|
||||
}
|
||||
}), error);
|
||||
};
|
||||
readEntries();
|
||||
}, error);
|
||||
};
|
||||
/**
|
||||
* Requests a storage quota from the browser to back this FS.
|
||||
*/
|
||||
HTML5FS.prototype._allocate = function (cb) {
|
||||
var _this = this;
|
||||
var success = function (fs) {
|
||||
_this.fs = fs;
|
||||
cb();
|
||||
};
|
||||
var error = function (err) {
|
||||
cb(convertError(err, "/", true));
|
||||
};
|
||||
if (this.type === global_1.default.PERSISTENT) {
|
||||
_requestQuota(this.type, this.size, function (granted) {
|
||||
_getFS(_this.type, granted, success, error);
|
||||
}, error);
|
||||
}
|
||||
else {
|
||||
_getFS(this.type, this.size, success, error);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Delete a file or directory from the file system
|
||||
* isFile should reflect which call was made to remove the it (`unlink` or
|
||||
* `rmdir`). If this doesn't match what's actually at `path`, an error will be
|
||||
* returned
|
||||
*/
|
||||
HTML5FS.prototype._remove = function (path, cb, isFile) {
|
||||
var success = function (entry) {
|
||||
var succ = function () {
|
||||
cb();
|
||||
};
|
||||
var err = function (err) {
|
||||
cb(convertError(err, path, !isFile));
|
||||
};
|
||||
entry.remove(succ, err);
|
||||
};
|
||||
var error = function (err) {
|
||||
cb(convertError(err, path, !isFile));
|
||||
};
|
||||
// Deleting the entry, so don't create it
|
||||
var opts = {
|
||||
create: false
|
||||
};
|
||||
if (isFile) {
|
||||
this.fs.root.getFile(path, opts, success, error);
|
||||
}
|
||||
else {
|
||||
this.fs.root.getDirectory(path, opts, success, error);
|
||||
}
|
||||
};
|
||||
HTML5FS.Name = "HTML5FS";
|
||||
HTML5FS.Options = {
|
||||
size: {
|
||||
type: "number",
|
||||
optional: true,
|
||||
description: "Storage quota to request, in megabytes. Allocated value may be less. Defaults to 5."
|
||||
},
|
||||
type: {
|
||||
type: "number",
|
||||
optional: true,
|
||||
description: "window.PERSISTENT or window.TEMPORARY. Defaults to PERSISTENT."
|
||||
}
|
||||
};
|
||||
return HTML5FS;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = HTML5FS;
|
||||
//# sourceMappingURL=HTML5FS.js.map
|
||||
98
sandpack-generated/static/browserfs11/node/backend/HTTPRequest.d.ts
vendored
Normal file
98
sandpack-generated/static/browserfs11/node/backend/HTTPRequest.d.ts
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFileSystem, FileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { File } from '../core/file';
|
||||
import Stats from '../core/node_fs_stats';
|
||||
/**
|
||||
* Configuration options for a HTTPRequest file system.
|
||||
*/
|
||||
export interface HTTPRequestOptions {
|
||||
index?: string | object;
|
||||
baseUrl?: string;
|
||||
preferXHR?: boolean;
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
export default class HTTPRequest extends BaseFileSystem implements FileSystem {
|
||||
static readonly Name = "HTTPRequest";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Construct an HTTPRequest file system backend with the given options.
|
||||
*/
|
||||
static Create(opts: HTTPRequestOptions, cb: BFSCallback<HTTPRequest>): void;
|
||||
static isAvailable(): boolean;
|
||||
readonly prefixUrl: string;
|
||||
private _index;
|
||||
private _requestFileAsyncInternal;
|
||||
private _requestFileSizeAsyncInternal;
|
||||
private _requestFileSyncInternal;
|
||||
private _requestFileSizeSyncInternal;
|
||||
private constructor();
|
||||
empty(): void;
|
||||
getName(): string;
|
||||
diskSpace(path: string, cb: (total: number, free: number) => void): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Special HTTPFS function: Preload the given file into the index.
|
||||
* @param [String] path
|
||||
* @param [BrowserFS.Buffer] buffer
|
||||
*/
|
||||
preloadFile(path: string, buffer: Buffer): void;
|
||||
stat(path: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
statSync(path: string, isLstat: boolean): Stats;
|
||||
open(path: string, flags: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openSync(path: string, flags: FileFlag, mode: number): File;
|
||||
readdir(path: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(path: string): string[];
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
readFile(fname: string, encoding: string, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
private _getHTTPPath;
|
||||
/**
|
||||
* Asynchronously download the given file.
|
||||
*/
|
||||
private _requestFileAsync;
|
||||
/**
|
||||
* Synchronously download the given file.
|
||||
*/
|
||||
private _requestFileSync;
|
||||
/**
|
||||
* Only requests the HEAD content, for the file size.
|
||||
*/
|
||||
private _requestFileSizeAsync;
|
||||
private _requestFileSizeSync;
|
||||
}
|
||||
@@ -0,0 +1,410 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var file_flag_1 = require("../core/file_flag");
|
||||
var util_1 = require("../core/util");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var xhr_1 = require("../generic/xhr");
|
||||
var fetch_1 = require("../generic/fetch");
|
||||
var file_index_1 = require("../generic/file_index");
|
||||
/**
|
||||
* Try to convert the given buffer into a string, and pass it to the callback.
|
||||
* Optimization that removes the needed try/catch into a helper function, as
|
||||
* this is an uncommon case.
|
||||
* @hidden
|
||||
*/
|
||||
function tryToString(buff, encoding, cb) {
|
||||
try {
|
||||
cb(null, buff.toString(encoding));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
}
|
||||
function syncNotAvailableError() {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP, "Synchronous HTTP download methods are not available in this environment.");
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
var HTTPRequest = /** @class */ (function (_super) {
|
||||
__extends(HTTPRequest, _super);
|
||||
function HTTPRequest(index, prefixUrl, preferXHR) {
|
||||
if (prefixUrl === void 0) { prefixUrl = ''; }
|
||||
if (preferXHR === void 0) { preferXHR = false; }
|
||||
var _this = _super.call(this) || this;
|
||||
// prefix_url must end in a directory separator.
|
||||
if (prefixUrl.length > 0 && prefixUrl.charAt(prefixUrl.length - 1) !== '/') {
|
||||
prefixUrl = prefixUrl + '/';
|
||||
}
|
||||
_this.prefixUrl = prefixUrl;
|
||||
_this._index = file_index_1.FileIndex.fromListing(index);
|
||||
if (fetch_1.fetchIsAvailable && (!preferXHR || !xhr_1.xhrIsAvailable)) {
|
||||
_this._requestFileAsyncInternal = fetch_1.fetchFileAsync;
|
||||
_this._requestFileSizeAsyncInternal = fetch_1.fetchFileSizeAsync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileAsyncInternal = xhr_1.asyncDownloadFile;
|
||||
_this._requestFileSizeAsyncInternal = xhr_1.getFileSizeAsync;
|
||||
}
|
||||
if (xhr_1.xhrIsAvailable) {
|
||||
_this._requestFileSyncInternal = xhr_1.syncDownloadFile;
|
||||
_this._requestFileSizeSyncInternal = xhr_1.getFileSizeSync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileSyncInternal = syncNotAvailableError;
|
||||
_this._requestFileSizeSyncInternal = syncNotAvailableError;
|
||||
}
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Construct an HTTPRequest file system backend with the given options.
|
||||
*/
|
||||
HTTPRequest.Create = function (opts, cb) {
|
||||
if (opts.index === undefined) {
|
||||
opts.index = "index.json";
|
||||
}
|
||||
if (typeof (opts.index) === "string") {
|
||||
(0, xhr_1.asyncDownloadFile)(opts.index, "json", function (e, data) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
cb(null, new HTTPRequest(data, opts.baseUrl));
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, new HTTPRequest(opts.index, opts.baseUrl));
|
||||
}
|
||||
};
|
||||
HTTPRequest.isAvailable = function () {
|
||||
return xhr_1.xhrIsAvailable || fetch_1.fetchIsAvailable;
|
||||
};
|
||||
HTTPRequest.prototype.empty = function () {
|
||||
this._index.fileIterator(function (file) {
|
||||
file.fileData = null;
|
||||
});
|
||||
};
|
||||
HTTPRequest.prototype.getName = function () {
|
||||
return HTTPRequest.Name;
|
||||
};
|
||||
HTTPRequest.prototype.diskSpace = function (path, cb) {
|
||||
// Read-only file system. We could calculate the total space, but that's not
|
||||
// important right now.
|
||||
cb(0, 0);
|
||||
};
|
||||
HTTPRequest.prototype.isReadOnly = function () {
|
||||
return true;
|
||||
};
|
||||
HTTPRequest.prototype.supportsLinks = function () {
|
||||
return false;
|
||||
};
|
||||
HTTPRequest.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
HTTPRequest.prototype.supportsSynch = function () {
|
||||
// Synchronous operations are only available via the XHR interface for now.
|
||||
return xhr_1.xhrIsAvailable;
|
||||
};
|
||||
/**
|
||||
* Special HTTPFS function: Preload the given file into the index.
|
||||
* @param [String] path
|
||||
* @param [BrowserFS.Buffer] buffer
|
||||
*/
|
||||
HTTPRequest.prototype.preloadFile = function (path, buffer) {
|
||||
var inode = this._index.getInode(path);
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats = inode.getData();
|
||||
stats.size = buffer.length;
|
||||
stats.fileData = buffer;
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
HTTPRequest.prototype.stat = function (path, isLstat, cb) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
return cb(api_error_1.ApiError.ENOENT(path));
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData();
|
||||
// At this point, a non-opened file will still have default stats from the listing.
|
||||
if (stats.size < 0) {
|
||||
this._requestFileSizeAsync(path, function (e, size) {
|
||||
if (e) {
|
||||
return cb(e);
|
||||
}
|
||||
stats.size = size;
|
||||
cb(null, node_fs_stats_1.default.clone(stats));
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, node_fs_stats_1.default.clone(stats));
|
||||
}
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
cb(null, stats);
|
||||
}
|
||||
else {
|
||||
cb(api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, path));
|
||||
}
|
||||
};
|
||||
HTTPRequest.prototype.statSync = function (path, isLstat) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData();
|
||||
// At this point, a non-opened file will still have default stats from the listing.
|
||||
if (stats.size < 0) {
|
||||
stats.size = this._requestFileSizeSync(path);
|
||||
}
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, path);
|
||||
}
|
||||
return stats;
|
||||
};
|
||||
HTTPRequest.prototype.open = function (path, flags, mode, cb) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path));
|
||||
}
|
||||
var self = this;
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
return cb(api_error_1.ApiError.ENOENT(path));
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var stats_1 = inode.getData();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
return cb(api_error_1.ApiError.EEXIST(path));
|
||||
case file_flag_1.ActionType.NOP:
|
||||
// Use existing file contents.
|
||||
// XXX: Uh, this maintains the previously-used flag.
|
||||
if (stats_1.fileData) {
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.clone(stats_1), stats_1.fileData));
|
||||
}
|
||||
// @todo be lazier about actually requesting the file
|
||||
this._requestFileAsync(path, 'buffer', function (err, buffer) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
// we don't initially have file sizes
|
||||
stats_1.size = buffer.length;
|
||||
stats_1.fileData = buffer;
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.clone(stats_1), buffer));
|
||||
});
|
||||
break;
|
||||
default:
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.'));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return cb(api_error_1.ApiError.EISDIR(path));
|
||||
}
|
||||
};
|
||||
HTTPRequest.prototype.openSync = function (path, flags, mode) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path);
|
||||
}
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var stats = inode.getData();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
throw api_error_1.ApiError.EEXIST(path);
|
||||
case file_flag_1.ActionType.NOP:
|
||||
// Use existing file contents.
|
||||
// XXX: Uh, this maintains the previously-used flag.
|
||||
if (stats.fileData) {
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, node_fs_stats_1.default.clone(stats), stats.fileData);
|
||||
}
|
||||
// @todo be lazier about actually requesting the file
|
||||
var buffer = this._requestFileSync(path, 'buffer');
|
||||
// we don't initially have file sizes
|
||||
stats.size = buffer.length;
|
||||
stats.fileData = buffer;
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, node_fs_stats_1.default.clone(stats), buffer);
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.');
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
HTTPRequest.prototype.readdir = function (path, cb) {
|
||||
try {
|
||||
cb(null, this.readdirSync(path));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
HTTPRequest.prototype.readdirSync = function (path) {
|
||||
// Check if it exists.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
return inode.getListing();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.ENOTDIR(path);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
HTTPRequest.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
// Wrap cb in file closing code.
|
||||
var oldCb = cb;
|
||||
// Get file.
|
||||
this.open(fname, flag, 0x1a4, function (err, fd) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
cb = function (err, arg) {
|
||||
fd.close(function (err2) {
|
||||
if (!err) {
|
||||
err = err2;
|
||||
}
|
||||
return oldCb(err, arg);
|
||||
});
|
||||
};
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
cb(err, (0, util_1.copyingSlice)(fdBuff));
|
||||
}
|
||||
else {
|
||||
tryToString(fdBuff, encoding, cb);
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
HTTPRequest.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
// Get file.
|
||||
var fd = this.openSync(fname, flag, 0x1a4);
|
||||
try {
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
return (0, util_1.copyingSlice)(fdBuff);
|
||||
}
|
||||
return fdBuff.toString(encoding);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
HTTPRequest.prototype._getHTTPPath = function (filePath) {
|
||||
if (filePath.charAt(0) === '/') {
|
||||
filePath = filePath.slice(1);
|
||||
}
|
||||
return this.prefixUrl + filePath;
|
||||
};
|
||||
HTTPRequest.prototype._requestFileAsync = function (p, type, cb) {
|
||||
this._requestFileAsyncInternal(this._getHTTPPath(p), type, cb);
|
||||
};
|
||||
HTTPRequest.prototype._requestFileSync = function (p, type) {
|
||||
return this._requestFileSyncInternal(this._getHTTPPath(p), type);
|
||||
};
|
||||
/**
|
||||
* Only requests the HEAD content, for the file size.
|
||||
*/
|
||||
HTTPRequest.prototype._requestFileSizeAsync = function (path, cb) {
|
||||
this._requestFileSizeAsyncInternal(this._getHTTPPath(path), cb);
|
||||
};
|
||||
HTTPRequest.prototype._requestFileSizeSync = function (path) {
|
||||
return this._requestFileSizeSyncInternal(this._getHTTPPath(path));
|
||||
};
|
||||
HTTPRequest.Name = "HTTPRequest";
|
||||
HTTPRequest.Options = {
|
||||
index: {
|
||||
type: ["string", "object"],
|
||||
optional: true,
|
||||
description: "URL to a file index as a JSON file or the file index object itself, generated with the make_http_index script. Defaults to `index.json`."
|
||||
},
|
||||
baseUrl: {
|
||||
type: "string",
|
||||
optional: true,
|
||||
description: "Used as the URL prefix for fetched files. Default: Fetch files relative to the index."
|
||||
},
|
||||
preferXHR: {
|
||||
type: "boolean",
|
||||
optional: true,
|
||||
description: "Whether to prefer XmlHttpRequest or fetch for async operations if both are available. Default: false"
|
||||
}
|
||||
};
|
||||
return HTTPRequest;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = HTTPRequest;
|
||||
//# sourceMappingURL=HTTPRequest.js.map
|
||||
28
sandpack-generated/static/browserfs11/node/backend/InMemory.d.ts
vendored
Normal file
28
sandpack-generated/static/browserfs11/node/backend/InMemory.d.ts
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
/// <reference types="node" />
|
||||
import { BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { SyncKeyValueStore, SimpleSyncStore, SyncKeyValueRWTransaction, SyncKeyValueFileSystem } from '../generic/key_value_filesystem';
|
||||
/**
|
||||
* A simple in-memory key-value store backed by a JavaScript object.
|
||||
*/
|
||||
export declare class InMemoryStore implements SyncKeyValueStore, SimpleSyncStore {
|
||||
private store;
|
||||
name(): string;
|
||||
clear(): void;
|
||||
beginTransaction(type: string): SyncKeyValueRWTransaction;
|
||||
get(key: string): Buffer;
|
||||
put(key: string, data: Buffer, overwrite: boolean): boolean;
|
||||
del(key: string): void;
|
||||
}
|
||||
/**
|
||||
* A simple in-memory file system backed by an InMemoryStore.
|
||||
* Files are not persisted across page loads.
|
||||
*/
|
||||
export default class InMemoryFileSystem extends SyncKeyValueFileSystem {
|
||||
static readonly Name = "InMemory";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates an InMemoryFileSystem instance.
|
||||
*/
|
||||
static Create(options: any, cb: BFSCallback<InMemoryFileSystem>): void;
|
||||
private constructor();
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.InMemoryStore = void 0;
|
||||
var key_value_filesystem_1 = require("../generic/key_value_filesystem");
|
||||
/**
|
||||
* A simple in-memory key-value store backed by a JavaScript object.
|
||||
*/
|
||||
var InMemoryStore = /** @class */ (function () {
|
||||
function InMemoryStore() {
|
||||
this.store = {};
|
||||
}
|
||||
InMemoryStore.prototype.name = function () { return InMemoryFileSystem.Name; };
|
||||
InMemoryStore.prototype.clear = function () { this.store = {}; };
|
||||
InMemoryStore.prototype.beginTransaction = function (type) {
|
||||
return new key_value_filesystem_1.SimpleSyncRWTransaction(this);
|
||||
};
|
||||
InMemoryStore.prototype.get = function (key) {
|
||||
return this.store[key];
|
||||
};
|
||||
InMemoryStore.prototype.put = function (key, data, overwrite) {
|
||||
if (!overwrite && this.store.hasOwnProperty(key)) {
|
||||
return false;
|
||||
}
|
||||
this.store[key] = data;
|
||||
return true;
|
||||
};
|
||||
InMemoryStore.prototype.del = function (key) {
|
||||
delete this.store[key];
|
||||
};
|
||||
return InMemoryStore;
|
||||
}());
|
||||
exports.InMemoryStore = InMemoryStore;
|
||||
/**
|
||||
* A simple in-memory file system backed by an InMemoryStore.
|
||||
* Files are not persisted across page loads.
|
||||
*/
|
||||
var InMemoryFileSystem = /** @class */ (function (_super) {
|
||||
__extends(InMemoryFileSystem, _super);
|
||||
function InMemoryFileSystem() {
|
||||
return _super.call(this, { store: new InMemoryStore() }) || this;
|
||||
}
|
||||
/**
|
||||
* Creates an InMemoryFileSystem instance.
|
||||
*/
|
||||
InMemoryFileSystem.Create = function (options, cb) {
|
||||
cb(null, new InMemoryFileSystem());
|
||||
};
|
||||
InMemoryFileSystem.Name = "InMemory";
|
||||
InMemoryFileSystem.Options = {};
|
||||
return InMemoryFileSystem;
|
||||
}(key_value_filesystem_1.SyncKeyValueFileSystem));
|
||||
exports.default = InMemoryFileSystem;
|
||||
//# sourceMappingURL=InMemory.js.map
|
||||
52
sandpack-generated/static/browserfs11/node/backend/IndexedDB.d.ts
vendored
Normal file
52
sandpack-generated/static/browserfs11/node/backend/IndexedDB.d.ts
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
/// <reference types="node" />
|
||||
import { BFSOneArgCallback, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { AsyncKeyValueROTransaction, AsyncKeyValueRWTransaction, AsyncKeyValueStore, AsyncKeyValueFileSystem } from '../generic/key_value_filesystem';
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export declare class IndexedDBROTransaction implements AsyncKeyValueROTransaction {
|
||||
tx: IDBTransaction;
|
||||
store: IDBObjectStore;
|
||||
constructor(tx: IDBTransaction, store: IDBObjectStore);
|
||||
get(key: string, cb: BFSCallback<Buffer>): void;
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export declare class IndexedDBRWTransaction extends IndexedDBROTransaction implements AsyncKeyValueRWTransaction, AsyncKeyValueROTransaction {
|
||||
constructor(tx: IDBTransaction, store: IDBObjectStore);
|
||||
put(key: string, data: Buffer, overwrite: boolean, cb: BFSCallback<boolean>): void;
|
||||
del(key: string, cb: BFSOneArgCallback): void;
|
||||
commit(cb: BFSOneArgCallback): void;
|
||||
abort(cb: BFSOneArgCallback): void;
|
||||
}
|
||||
export declare class IndexedDBStore implements AsyncKeyValueStore {
|
||||
private db;
|
||||
private storeName;
|
||||
static Create(storeName: string, cb: BFSCallback<IndexedDBStore>): void;
|
||||
constructor(db: IDBDatabase, storeName: string);
|
||||
name(): string;
|
||||
clear(cb: BFSOneArgCallback): void;
|
||||
beginTransaction(type: 'readonly'): AsyncKeyValueROTransaction;
|
||||
beginTransaction(type: 'readwrite'): AsyncKeyValueRWTransaction;
|
||||
}
|
||||
/**
|
||||
* Configuration options for the IndexedDB file system.
|
||||
*/
|
||||
export interface IndexedDBFileSystemOptions {
|
||||
storeName?: string;
|
||||
cacheSize?: number;
|
||||
}
|
||||
/**
|
||||
* A file system that uses the IndexedDB key value file system.
|
||||
*/
|
||||
export default class IndexedDBFileSystem extends AsyncKeyValueFileSystem {
|
||||
static readonly Name = "IndexedDB";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Constructs an IndexedDB file system with the given options.
|
||||
*/
|
||||
static Create(opts: IndexedDBFileSystemOptions, cb: BFSCallback<IndexedDBFileSystem>): void;
|
||||
static isAvailable(): boolean;
|
||||
private constructor();
|
||||
}
|
||||
263
sandpack-generated/static/browserfs11/node/backend/IndexedDB.js
Normal file
263
sandpack-generated/static/browserfs11/node/backend/IndexedDB.js
Normal file
@@ -0,0 +1,263 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.IndexedDBStore = exports.IndexedDBRWTransaction = exports.IndexedDBROTransaction = void 0;
|
||||
var key_value_filesystem_1 = require("../generic/key_value_filesystem");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var global_1 = require("../core/global");
|
||||
var util_1 = require("../core/util");
|
||||
/**
|
||||
* Get the indexedDB constructor for the current browser.
|
||||
* @hidden
|
||||
*/
|
||||
var indexedDB = global_1.default.indexedDB ||
|
||||
global_1.default.mozIndexedDB ||
|
||||
global_1.default.webkitIndexedDB ||
|
||||
global_1.default.msIndexedDB;
|
||||
/**
|
||||
* Converts a DOMException or a DOMError from an IndexedDB event into a
|
||||
* standardized BrowserFS API error.
|
||||
* @hidden
|
||||
*/
|
||||
function convertError(e, message) {
|
||||
if (message === void 0) { message = e.toString(); }
|
||||
switch (e.name) {
|
||||
case "NotFoundError":
|
||||
return new api_error_1.ApiError(api_error_1.ErrorCode.ENOENT, message);
|
||||
case "QuotaExceededError":
|
||||
return new api_error_1.ApiError(api_error_1.ErrorCode.ENOSPC, message);
|
||||
default:
|
||||
// The rest do not seem to map cleanly to standard error codes.
|
||||
return new api_error_1.ApiError(api_error_1.ErrorCode.EIO, message);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Produces a new onerror handler for IDB. Our errors are always fatal, so we
|
||||
* handle them generically: Call the user-supplied callback with a translated
|
||||
* version of the error, and let the error bubble up.
|
||||
* @hidden
|
||||
*/
|
||||
function onErrorHandler(cb, code, message) {
|
||||
if (code === void 0) { code = api_error_1.ErrorCode.EIO; }
|
||||
if (message === void 0) { message = null; }
|
||||
return function (e) {
|
||||
// Prevent the error from canceling the transaction.
|
||||
e.preventDefault();
|
||||
cb(new api_error_1.ApiError(code, message !== null ? message : undefined));
|
||||
};
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var IndexedDBROTransaction = /** @class */ (function () {
|
||||
function IndexedDBROTransaction(tx, store) {
|
||||
this.tx = tx;
|
||||
this.store = store;
|
||||
}
|
||||
IndexedDBROTransaction.prototype.get = function (key, cb) {
|
||||
try {
|
||||
var r = this.store.get(key);
|
||||
r.onerror = onErrorHandler(cb);
|
||||
r.onsuccess = function (event) {
|
||||
// IDB returns the value 'undefined' when you try to get keys that
|
||||
// don't exist. The caller expects this behavior.
|
||||
var result = event.target.result;
|
||||
if (result === undefined) {
|
||||
cb(null, result);
|
||||
}
|
||||
else {
|
||||
// IDB data is stored as an ArrayBuffer
|
||||
cb(null, (0, util_1.arrayBuffer2Buffer)(result));
|
||||
}
|
||||
};
|
||||
}
|
||||
catch (e) {
|
||||
cb(convertError(e));
|
||||
}
|
||||
};
|
||||
return IndexedDBROTransaction;
|
||||
}());
|
||||
exports.IndexedDBROTransaction = IndexedDBROTransaction;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var IndexedDBRWTransaction = /** @class */ (function (_super) {
|
||||
__extends(IndexedDBRWTransaction, _super);
|
||||
function IndexedDBRWTransaction(tx, store) {
|
||||
return _super.call(this, tx, store) || this;
|
||||
}
|
||||
IndexedDBRWTransaction.prototype.put = function (key, data, overwrite, cb) {
|
||||
try {
|
||||
var arraybuffer = (0, util_1.buffer2ArrayBuffer)(data);
|
||||
var r = void 0;
|
||||
// Note: 'add' will never overwrite an existing key.
|
||||
r = overwrite ? this.store.put(arraybuffer, key) : this.store.add(arraybuffer, key);
|
||||
// XXX: NEED TO RETURN FALSE WHEN ADD HAS A KEY CONFLICT. NO ERROR.
|
||||
r.onerror = onErrorHandler(cb);
|
||||
r.onsuccess = function (event) {
|
||||
cb(null, true);
|
||||
};
|
||||
}
|
||||
catch (e) {
|
||||
cb(convertError(e));
|
||||
}
|
||||
};
|
||||
IndexedDBRWTransaction.prototype.del = function (key, cb) {
|
||||
try {
|
||||
// NOTE: IE8 has a bug with identifiers named 'delete' unless used as a string
|
||||
// like this.
|
||||
// http://stackoverflow.com/a/26479152
|
||||
var r = this.store['delete'](key);
|
||||
r.onerror = onErrorHandler(cb);
|
||||
r.onsuccess = function (event) {
|
||||
cb();
|
||||
};
|
||||
}
|
||||
catch (e) {
|
||||
cb(convertError(e));
|
||||
}
|
||||
};
|
||||
IndexedDBRWTransaction.prototype.commit = function (cb) {
|
||||
// Return to the event loop to commit the transaction.
|
||||
setTimeout(cb, 0);
|
||||
};
|
||||
IndexedDBRWTransaction.prototype.abort = function (cb) {
|
||||
var _e = null;
|
||||
try {
|
||||
this.tx.abort();
|
||||
}
|
||||
catch (e) {
|
||||
_e = convertError(e);
|
||||
}
|
||||
finally {
|
||||
cb(_e);
|
||||
}
|
||||
};
|
||||
return IndexedDBRWTransaction;
|
||||
}(IndexedDBROTransaction));
|
||||
exports.IndexedDBRWTransaction = IndexedDBRWTransaction;
|
||||
var IndexedDBStore = /** @class */ (function () {
|
||||
function IndexedDBStore(db, storeName) {
|
||||
this.db = db;
|
||||
this.storeName = storeName;
|
||||
}
|
||||
IndexedDBStore.Create = function (storeName, cb) {
|
||||
var openReq = indexedDB.open(storeName, 1);
|
||||
openReq.onupgradeneeded = function (event) {
|
||||
var db = event.target.result;
|
||||
// Huh. This should never happen; we're at version 1. Why does another
|
||||
// database exist?
|
||||
if (db.objectStoreNames.contains(storeName)) {
|
||||
db.deleteObjectStore(storeName);
|
||||
}
|
||||
db.createObjectStore(storeName);
|
||||
};
|
||||
openReq.onsuccess = function (event) {
|
||||
cb(null, new IndexedDBStore(event.target.result, storeName));
|
||||
};
|
||||
openReq.onerror = onErrorHandler(cb, api_error_1.ErrorCode.EACCES);
|
||||
};
|
||||
IndexedDBStore.prototype.name = function () {
|
||||
return IndexedDBFileSystem.Name + " - " + this.storeName;
|
||||
};
|
||||
IndexedDBStore.prototype.clear = function (cb) {
|
||||
try {
|
||||
var tx = this.db.transaction(this.storeName, 'readwrite'), objectStore = tx.objectStore(this.storeName), r = objectStore.clear();
|
||||
r.onsuccess = function (event) {
|
||||
// Use setTimeout to commit transaction.
|
||||
setTimeout(cb, 0);
|
||||
};
|
||||
r.onerror = onErrorHandler(cb);
|
||||
}
|
||||
catch (e) {
|
||||
cb(convertError(e));
|
||||
}
|
||||
};
|
||||
IndexedDBStore.prototype.beginTransaction = function (type) {
|
||||
if (type === void 0) { type = 'readonly'; }
|
||||
var tx = this.db.transaction(this.storeName, type), objectStore = tx.objectStore(this.storeName);
|
||||
if (type === 'readwrite') {
|
||||
return new IndexedDBRWTransaction(tx, objectStore);
|
||||
}
|
||||
else if (type === 'readonly') {
|
||||
return new IndexedDBROTransaction(tx, objectStore);
|
||||
}
|
||||
else {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid transaction type.');
|
||||
}
|
||||
};
|
||||
return IndexedDBStore;
|
||||
}());
|
||||
exports.IndexedDBStore = IndexedDBStore;
|
||||
/**
|
||||
* A file system that uses the IndexedDB key value file system.
|
||||
*/
|
||||
var IndexedDBFileSystem = /** @class */ (function (_super) {
|
||||
__extends(IndexedDBFileSystem, _super);
|
||||
function IndexedDBFileSystem(cacheSize) {
|
||||
return _super.call(this, cacheSize) || this;
|
||||
}
|
||||
/**
|
||||
* Constructs an IndexedDB file system with the given options.
|
||||
*/
|
||||
IndexedDBFileSystem.Create = function (opts, cb) {
|
||||
IndexedDBStore.Create(opts.storeName ? opts.storeName : 'browserfs', function (e, store) {
|
||||
if (store) {
|
||||
var idbfs_1 = new IndexedDBFileSystem(typeof (opts.cacheSize) === 'number' ? opts.cacheSize : 100);
|
||||
idbfs_1.init(store, function (e) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
cb(null, idbfs_1);
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
IndexedDBFileSystem.isAvailable = function () {
|
||||
// In Safari's private browsing mode, indexedDB.open returns NULL.
|
||||
// In Firefox, it throws an exception.
|
||||
// In Chrome, it "just works", and clears the database when you leave the page.
|
||||
// Untested: Opera, IE.
|
||||
try {
|
||||
return typeof indexedDB !== 'undefined' && null !== indexedDB.open("__browserfs_test__");
|
||||
}
|
||||
catch (e) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
IndexedDBFileSystem.Name = "IndexedDB";
|
||||
IndexedDBFileSystem.Options = {
|
||||
storeName: {
|
||||
type: "string",
|
||||
optional: true,
|
||||
description: "The name of this file system. You can have multiple IndexedDB file systems operating at once, but each must have a different name."
|
||||
},
|
||||
cacheSize: {
|
||||
type: "number",
|
||||
optional: true,
|
||||
description: "The size of the inode cache. Defaults to 100. A size of 0 or below disables caching."
|
||||
}
|
||||
};
|
||||
return IndexedDBFileSystem;
|
||||
}(key_value_filesystem_1.AsyncKeyValueFileSystem));
|
||||
exports.default = IndexedDBFileSystem;
|
||||
//# sourceMappingURL=IndexedDB.js.map
|
||||
55
sandpack-generated/static/browserfs11/node/backend/IsoFS.d.ts
vendored
Normal file
55
sandpack-generated/static/browserfs11/node/backend/IsoFS.d.ts
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
/// <reference types="node" />
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { SynchronousFileSystem, FileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { File } from '../core/file';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
/**
|
||||
* Options for IsoFS file system instances.
|
||||
*/
|
||||
export interface IsoFSOptions {
|
||||
data: Buffer;
|
||||
name?: string;
|
||||
}
|
||||
/**
|
||||
* Mounts an ISO file as a read-only file system.
|
||||
*
|
||||
* Supports:
|
||||
* * Vanilla ISO9660 ISOs
|
||||
* * Microsoft Joliet and Rock Ridge extensions to the ISO9660 standard
|
||||
*/
|
||||
export default class IsoFS extends SynchronousFileSystem implements FileSystem {
|
||||
static readonly Name = "IsoFS";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates an IsoFS instance with the given options.
|
||||
*/
|
||||
static Create(opts: IsoFSOptions, cb: BFSCallback<IsoFS>): void;
|
||||
static isAvailable(): boolean;
|
||||
private _data;
|
||||
private _pvd;
|
||||
private _root;
|
||||
private _name;
|
||||
/**
|
||||
* **Deprecated. Please use IsoFS.Create() method instead.**
|
||||
*
|
||||
* Constructs a read-only file system from the given ISO.
|
||||
* @param data The ISO file in a buffer.
|
||||
* @param name The name of the ISO (optional; used for debug messages / identification via getName()).
|
||||
*/
|
||||
private constructor();
|
||||
getName(): string;
|
||||
diskSpace(path: string, cb: (total: number, free: number) => void): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
statSync(p: string, isLstat: boolean): Stats;
|
||||
openSync(p: string, flags: FileFlag, mode: number): File;
|
||||
readdirSync(path: string): string[];
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
private _getDirectoryRecord;
|
||||
private _getStats;
|
||||
}
|
||||
1332
sandpack-generated/static/browserfs11/node/backend/IsoFS.js
Normal file
1332
sandpack-generated/static/browserfs11/node/backend/IsoFS.js
Normal file
File diff suppressed because it is too large
Load Diff
109
sandpack-generated/static/browserfs11/node/backend/JSDelivrRequest.d.ts
vendored
Normal file
109
sandpack-generated/static/browserfs11/node/backend/JSDelivrRequest.d.ts
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFileSystem, FileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { File } from '../core/file';
|
||||
import Stats from '../core/node_fs_stats';
|
||||
/**
|
||||
* Configuration options for a HTTPRequest file system.
|
||||
*/
|
||||
export interface JSDelivrRequestOptions {
|
||||
dependency: string;
|
||||
version: string;
|
||||
preferXHR?: boolean;
|
||||
}
|
||||
export type JSDelivrMeta = {
|
||||
files: JSDelivrMetaFile[];
|
||||
};
|
||||
export interface JSDelivrMetaFile {
|
||||
name: string;
|
||||
hash: string;
|
||||
time: string;
|
||||
size: number;
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
export default class JSDelivrRequest extends BaseFileSystem implements FileSystem {
|
||||
private dependency;
|
||||
private version;
|
||||
static readonly Name = "JSDelivrRequest";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Construct an HTTPRequest file system backend with the given options.
|
||||
*/
|
||||
static Create(opts: JSDelivrRequestOptions, cb: BFSCallback<JSDelivrRequest>): void;
|
||||
static isAvailable(): boolean;
|
||||
readonly prefixUrl: string;
|
||||
private _index;
|
||||
private _requestFileAsyncInternal;
|
||||
private _requestFileSizeAsyncInternal;
|
||||
private _requestFileSyncInternal;
|
||||
private _requestFileSizeSyncInternal;
|
||||
private constructor();
|
||||
empty(): void;
|
||||
getName(): string;
|
||||
diskSpace(path: string, cb: (total: number, free: number) => void): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Special HTTPFS function: Preload the given file into the index.
|
||||
* @param [String] path
|
||||
* @param [BrowserFS.Buffer] buffer
|
||||
*/
|
||||
preloadFile(path: string, buffer: Buffer): void;
|
||||
stat(path: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
statSync(path: string, isLstat: boolean): Stats;
|
||||
open(path: string, flags: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openSync(path: string, flags: FileFlag, mode: number): File;
|
||||
readdir(path: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(path: string): string[];
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
readFile(fname: string, encoding: string, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
private _getHTTPPath;
|
||||
/**
|
||||
* Asynchronously download the given file.
|
||||
*/
|
||||
private _requestFileAsync;
|
||||
/**
|
||||
* Synchronously download the given file.
|
||||
*/
|
||||
private _requestFileSync;
|
||||
/**
|
||||
* Only requests the HEAD content, for the file size.
|
||||
*/
|
||||
private _requestFileSizeAsync;
|
||||
private _requestFileSizeSync;
|
||||
}
|
||||
@@ -0,0 +1,397 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var file_flag_1 = require("../core/file_flag");
|
||||
var util_1 = require("../core/util");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var xhr_1 = require("../generic/xhr");
|
||||
var fetch_1 = require("../generic/fetch");
|
||||
var file_index_1 = require("../generic/file_index");
|
||||
/**
|
||||
* Try to convert the given buffer into a string, and pass it to the callback.
|
||||
* Optimization that removes the needed try/catch into a helper function, as
|
||||
* this is an uncommon case.
|
||||
* @hidden
|
||||
*/
|
||||
function tryToString(buff, encoding, cb) {
|
||||
try {
|
||||
cb(null, buff.toString(encoding));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
}
|
||||
function syncNotAvailableError() {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP, "Synchronous HTTP download methods are not available in this environment.");
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
var JSDelivrRequest = /** @class */ (function (_super) {
|
||||
__extends(JSDelivrRequest, _super);
|
||||
function JSDelivrRequest(meta, dependency, version, preferXHR) {
|
||||
if (preferXHR === void 0) { preferXHR = false; }
|
||||
var _this = _super.call(this) || this;
|
||||
_this.dependency = dependency;
|
||||
_this.version = version;
|
||||
_this._index = file_index_1.FileIndex.fromJSDelivr(meta);
|
||||
if (fetch_1.fetchIsAvailable && (!preferXHR || !xhr_1.xhrIsAvailable)) {
|
||||
_this._requestFileAsyncInternal = fetch_1.fetchFileAsync;
|
||||
_this._requestFileSizeAsyncInternal = fetch_1.fetchFileSizeAsync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileAsyncInternal = xhr_1.asyncDownloadFile;
|
||||
_this._requestFileSizeAsyncInternal = xhr_1.getFileSizeAsync;
|
||||
}
|
||||
if (xhr_1.xhrIsAvailable) {
|
||||
_this._requestFileSyncInternal = xhr_1.syncDownloadFile;
|
||||
_this._requestFileSizeSyncInternal = xhr_1.getFileSizeSync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileSyncInternal = syncNotAvailableError;
|
||||
_this._requestFileSizeSyncInternal = syncNotAvailableError;
|
||||
}
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Construct an HTTPRequest file system backend with the given options.
|
||||
*/
|
||||
JSDelivrRequest.Create = function (opts, cb) {
|
||||
var URL = "https://data.jsdelivr.com/v1/package/npm/".concat(opts.dependency, "@").concat(opts.version, "/flat");
|
||||
(0, xhr_1.asyncDownloadFile)(URL, "json", function (e, data) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
cb(null, new JSDelivrRequest(data, opts.dependency, opts.version));
|
||||
}
|
||||
});
|
||||
};
|
||||
JSDelivrRequest.isAvailable = function () {
|
||||
return xhr_1.xhrIsAvailable || fetch_1.fetchIsAvailable;
|
||||
};
|
||||
JSDelivrRequest.prototype.empty = function () {
|
||||
this._index.fileIterator(function (file) {
|
||||
file.fileData = null;
|
||||
});
|
||||
};
|
||||
JSDelivrRequest.prototype.getName = function () {
|
||||
return JSDelivrRequest.Name;
|
||||
};
|
||||
JSDelivrRequest.prototype.diskSpace = function (path, cb) {
|
||||
// Read-only file system. We could calculate the total space, but that's not
|
||||
// important right now.
|
||||
cb(0, 0);
|
||||
};
|
||||
JSDelivrRequest.prototype.isReadOnly = function () {
|
||||
return true;
|
||||
};
|
||||
JSDelivrRequest.prototype.supportsLinks = function () {
|
||||
return false;
|
||||
};
|
||||
JSDelivrRequest.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
JSDelivrRequest.prototype.supportsSynch = function () {
|
||||
// Synchronous operations are only available via the XHR interface for now.
|
||||
return xhr_1.xhrIsAvailable;
|
||||
};
|
||||
/**
|
||||
* Special HTTPFS function: Preload the given file into the index.
|
||||
* @param [String] path
|
||||
* @param [BrowserFS.Buffer] buffer
|
||||
*/
|
||||
JSDelivrRequest.prototype.preloadFile = function (path, buffer) {
|
||||
var inode = this._index.getInode(path);
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats = inode.getData();
|
||||
stats.size = buffer.length;
|
||||
stats.fileData = buffer;
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
JSDelivrRequest.prototype.stat = function (path, isLstat, cb) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
return cb(api_error_1.ApiError.ENOENT(path));
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData();
|
||||
// At this point, a non-opened file will still have default stats from the listing.
|
||||
if (stats.size < 0) {
|
||||
this._requestFileSizeAsync(path, function (e, size) {
|
||||
if (e) {
|
||||
return cb(e);
|
||||
}
|
||||
stats.size = size;
|
||||
cb(null, node_fs_stats_1.default.clone(stats));
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, node_fs_stats_1.default.clone(stats));
|
||||
}
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
cb(null, stats);
|
||||
}
|
||||
else {
|
||||
cb(api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, path));
|
||||
}
|
||||
};
|
||||
JSDelivrRequest.prototype.statSync = function (path, isLstat) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData();
|
||||
// At this point, a non-opened file will still have default stats from the listing.
|
||||
if (stats.size < 0) {
|
||||
stats.size = this._requestFileSizeSync(path);
|
||||
}
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, path);
|
||||
}
|
||||
return stats;
|
||||
};
|
||||
JSDelivrRequest.prototype.open = function (path, flags, mode, cb) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path));
|
||||
}
|
||||
var self = this;
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
return cb(api_error_1.ApiError.ENOENT(path));
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var stats_1 = inode.getData();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
return cb(api_error_1.ApiError.EEXIST(path));
|
||||
case file_flag_1.ActionType.NOP:
|
||||
// Use existing file contents.
|
||||
// XXX: Uh, this maintains the previously-used flag.
|
||||
if (stats_1.fileData) {
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.clone(stats_1), stats_1.fileData));
|
||||
}
|
||||
// @todo be lazier about actually requesting the file
|
||||
this._requestFileAsync(path, 'buffer', function (err, buffer) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
// we don't initially have file sizes
|
||||
stats_1.size = buffer.length;
|
||||
stats_1.fileData = buffer;
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.clone(stats_1), buffer));
|
||||
});
|
||||
break;
|
||||
default:
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.'));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return cb(api_error_1.ApiError.EISDIR(path));
|
||||
}
|
||||
};
|
||||
JSDelivrRequest.prototype.openSync = function (path, flags, mode) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path);
|
||||
}
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var stats = inode.getData();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
throw api_error_1.ApiError.EEXIST(path);
|
||||
case file_flag_1.ActionType.NOP:
|
||||
// Use existing file contents.
|
||||
// XXX: Uh, this maintains the previously-used flag.
|
||||
if (stats.fileData) {
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, node_fs_stats_1.default.clone(stats), stats.fileData);
|
||||
}
|
||||
// @todo be lazier about actually requesting the file
|
||||
var buffer = this._requestFileSync(path, 'buffer');
|
||||
// we don't initially have file sizes
|
||||
stats.size = buffer.length;
|
||||
stats.fileData = buffer;
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, node_fs_stats_1.default.clone(stats), buffer);
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.');
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
JSDelivrRequest.prototype.readdir = function (path, cb) {
|
||||
try {
|
||||
cb(null, this.readdirSync(path));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
JSDelivrRequest.prototype.readdirSync = function (path) {
|
||||
// Check if it exists.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
return inode.getListing();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.ENOTDIR(path);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
JSDelivrRequest.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
// Wrap cb in file closing code.
|
||||
var oldCb = cb;
|
||||
// Get file.
|
||||
this.open(fname, flag, 0x1a4, function (err, fd) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
cb = function (err, arg) {
|
||||
fd.close(function (err2) {
|
||||
if (!err) {
|
||||
err = err2;
|
||||
}
|
||||
return oldCb(err, arg);
|
||||
});
|
||||
};
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
cb(err, (0, util_1.copyingSlice)(fdBuff));
|
||||
}
|
||||
else {
|
||||
tryToString(fdBuff, encoding, cb);
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
JSDelivrRequest.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
// Get file.
|
||||
var fd = this.openSync(fname, flag, 0x1a4);
|
||||
try {
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
return (0, util_1.copyingSlice)(fdBuff);
|
||||
}
|
||||
return fdBuff.toString(encoding);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
JSDelivrRequest.prototype._getHTTPPath = function (filePath) {
|
||||
if (filePath.charAt(0) === '/') {
|
||||
filePath = filePath.slice(1);
|
||||
}
|
||||
return "https://cdn.jsdelivr.net/npm/".concat(this.dependency, "@").concat(this.version, "/").concat(filePath);
|
||||
};
|
||||
JSDelivrRequest.prototype._requestFileAsync = function (p, type, cb) {
|
||||
this._requestFileAsyncInternal(this._getHTTPPath(p), type, cb);
|
||||
};
|
||||
JSDelivrRequest.prototype._requestFileSync = function (p, type) {
|
||||
return this._requestFileSyncInternal(this._getHTTPPath(p), type);
|
||||
};
|
||||
/**
|
||||
* Only requests the HEAD content, for the file size.
|
||||
*/
|
||||
JSDelivrRequest.prototype._requestFileSizeAsync = function (path, cb) {
|
||||
this._requestFileSizeAsyncInternal(this._getHTTPPath(path), cb);
|
||||
};
|
||||
JSDelivrRequest.prototype._requestFileSizeSync = function (path) {
|
||||
return this._requestFileSizeSyncInternal(this._getHTTPPath(path));
|
||||
};
|
||||
JSDelivrRequest.Name = "JSDelivrRequest";
|
||||
JSDelivrRequest.Options = {
|
||||
dependency: {
|
||||
type: "string",
|
||||
description: "Name of dependency"
|
||||
},
|
||||
version: {
|
||||
type: "string",
|
||||
description: "Version of dependency, has to be absolute"
|
||||
},
|
||||
preferXHR: {
|
||||
type: "boolean",
|
||||
optional: true,
|
||||
description: "Whether to prefer XmlHttpRequest or fetch for async operations if both are available. Default: false"
|
||||
}
|
||||
};
|
||||
return JSDelivrRequest;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = JSDelivrRequest;
|
||||
//# sourceMappingURL=JSDelivrRequest.js.map
|
||||
31
sandpack-generated/static/browserfs11/node/backend/LocalStorage.d.ts
vendored
Normal file
31
sandpack-generated/static/browserfs11/node/backend/LocalStorage.d.ts
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/// <reference types="node" />
|
||||
import { BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { SyncKeyValueStore, SimpleSyncStore, SyncKeyValueFileSystem, SyncKeyValueRWTransaction } from '../generic/key_value_filesystem';
|
||||
/**
|
||||
* A synchronous key-value store backed by localStorage.
|
||||
*/
|
||||
export declare class LocalStorageStore implements SyncKeyValueStore, SimpleSyncStore {
|
||||
name(): string;
|
||||
clear(): void;
|
||||
beginTransaction(type: string): SyncKeyValueRWTransaction;
|
||||
get(key: string): Buffer | undefined;
|
||||
put(key: string, data: Buffer, overwrite: boolean): boolean;
|
||||
del(key: string): void;
|
||||
}
|
||||
/**
|
||||
* A synchronous file system backed by localStorage. Connects our
|
||||
* LocalStorageStore to our SyncKeyValueFileSystem.
|
||||
*/
|
||||
export default class LocalStorageFileSystem extends SyncKeyValueFileSystem {
|
||||
static readonly Name = "LocalStorage";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates a LocalStorageFileSystem instance.
|
||||
*/
|
||||
static Create(options: any, cb: BFSCallback<LocalStorageFileSystem>): void;
|
||||
static isAvailable(): boolean;
|
||||
/**
|
||||
* Creates a new LocalStorage file system using the contents of `localStorage`.
|
||||
*/
|
||||
private constructor();
|
||||
}
|
||||
@@ -0,0 +1,122 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.LocalStorageStore = void 0;
|
||||
var key_value_filesystem_1 = require("../generic/key_value_filesystem");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var global_1 = require("../core/global");
|
||||
/**
|
||||
* Some versions of FF and all versions of IE do not support the full range of
|
||||
* 16-bit numbers encoded as characters, as they enforce UTF-16 restrictions.
|
||||
* @url http://stackoverflow.com/questions/11170716/are-there-any-characters-that-are-not-allowed-in-localstorage/11173673#11173673
|
||||
* @hidden
|
||||
*/
|
||||
var supportsBinaryString = false, binaryEncoding;
|
||||
try {
|
||||
global_1.default.localStorage.setItem("__test__", String.fromCharCode(0xD800));
|
||||
supportsBinaryString = global_1.default.localStorage.getItem("__test__") === String.fromCharCode(0xD800);
|
||||
}
|
||||
catch (e) {
|
||||
// IE throws an exception.
|
||||
supportsBinaryString = false;
|
||||
}
|
||||
binaryEncoding = supportsBinaryString ? 'binary_string' : 'binary_string_ie';
|
||||
if (!Buffer.isEncoding(binaryEncoding)) {
|
||||
// Fallback for non BrowserFS implementations of buffer that lack a
|
||||
// binary_string format.
|
||||
binaryEncoding = "base64";
|
||||
}
|
||||
/**
|
||||
* A synchronous key-value store backed by localStorage.
|
||||
*/
|
||||
var LocalStorageStore = /** @class */ (function () {
|
||||
function LocalStorageStore() {
|
||||
}
|
||||
LocalStorageStore.prototype.name = function () {
|
||||
return LocalStorageFileSystem.Name;
|
||||
};
|
||||
LocalStorageStore.prototype.clear = function () {
|
||||
global_1.default.localStorage.clear();
|
||||
};
|
||||
LocalStorageStore.prototype.beginTransaction = function (type) {
|
||||
// No need to differentiate.
|
||||
return new key_value_filesystem_1.SimpleSyncRWTransaction(this);
|
||||
};
|
||||
LocalStorageStore.prototype.get = function (key) {
|
||||
try {
|
||||
var data = global_1.default.localStorage.getItem(key);
|
||||
if (data !== null) {
|
||||
return Buffer.from(data, binaryEncoding);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// Do nothing.
|
||||
}
|
||||
// Key doesn't exist, or a failure occurred.
|
||||
return undefined;
|
||||
};
|
||||
LocalStorageStore.prototype.put = function (key, data, overwrite) {
|
||||
try {
|
||||
if (!overwrite && global_1.default.localStorage.getItem(key) !== null) {
|
||||
// Don't want to overwrite the key!
|
||||
return false;
|
||||
}
|
||||
global_1.default.localStorage.setItem(key, data.toString(binaryEncoding));
|
||||
return true;
|
||||
}
|
||||
catch (e) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOSPC, "LocalStorage is full.");
|
||||
}
|
||||
};
|
||||
LocalStorageStore.prototype.del = function (key) {
|
||||
try {
|
||||
global_1.default.localStorage.removeItem(key);
|
||||
}
|
||||
catch (e) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "Unable to delete key " + key + ": " + e);
|
||||
}
|
||||
};
|
||||
return LocalStorageStore;
|
||||
}());
|
||||
exports.LocalStorageStore = LocalStorageStore;
|
||||
/**
|
||||
* A synchronous file system backed by localStorage. Connects our
|
||||
* LocalStorageStore to our SyncKeyValueFileSystem.
|
||||
*/
|
||||
var LocalStorageFileSystem = /** @class */ (function (_super) {
|
||||
__extends(LocalStorageFileSystem, _super);
|
||||
/**
|
||||
* Creates a new LocalStorage file system using the contents of `localStorage`.
|
||||
*/
|
||||
function LocalStorageFileSystem() {
|
||||
return _super.call(this, { store: new LocalStorageStore() }) || this;
|
||||
}
|
||||
/**
|
||||
* Creates a LocalStorageFileSystem instance.
|
||||
*/
|
||||
LocalStorageFileSystem.Create = function (options, cb) {
|
||||
cb(null, new LocalStorageFileSystem());
|
||||
};
|
||||
LocalStorageFileSystem.isAvailable = function () {
|
||||
return typeof global_1.default.localStorage !== 'undefined';
|
||||
};
|
||||
LocalStorageFileSystem.Name = "LocalStorage";
|
||||
LocalStorageFileSystem.Options = {};
|
||||
return LocalStorageFileSystem;
|
||||
}(key_value_filesystem_1.SyncKeyValueFileSystem));
|
||||
exports.default = LocalStorageFileSystem;
|
||||
//# sourceMappingURL=LocalStorage.js.map
|
||||
109
sandpack-generated/static/browserfs11/node/backend/MountableFileSystem.d.ts
vendored
Normal file
109
sandpack-generated/static/browserfs11/node/backend/MountableFileSystem.d.ts
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
import { FileSystem, BaseFileSystem, BFSOneArgCallback, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { ApiError } from '../core/api_error';
|
||||
/**
|
||||
* Configuration options for the MountableFileSystem backend.
|
||||
*/
|
||||
export interface MountableFileSystemOptions {
|
||||
[mountPoint: string]: FileSystem;
|
||||
}
|
||||
/**
|
||||
* The MountableFileSystem allows you to mount multiple backend types or
|
||||
* multiple instantiations of the same backend into a single file system tree.
|
||||
* The file systems do not need to know about each other; all interactions are
|
||||
* automatically facilitated through this interface.
|
||||
*
|
||||
* For example, if a file system is mounted at /mnt/blah, and a request came in
|
||||
* for /mnt/blah/foo.txt, the file system would see a request for /foo.txt.
|
||||
*
|
||||
* You can mount file systems when you configure the file system:
|
||||
* ```javascript
|
||||
* BrowserFS.configure({
|
||||
* fs: "MountableFileSystem",
|
||||
* options: {
|
||||
* '/data': { fs: 'HTTPRequest', options: { index: "http://mysite.com/files/index.json" } },
|
||||
* '/home': { fs: 'LocalStorage' }
|
||||
* }
|
||||
* }, function(e) {
|
||||
*
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* For advanced users, you can also mount file systems *after* MFS is constructed:
|
||||
* ```javascript
|
||||
* BrowserFS.FileSystem.HTTPRequest.Create({
|
||||
* index: "http://mysite.com/files/index.json"
|
||||
* }, function(e, xhrfs) {
|
||||
* BrowserFS.FileSystem.MountableFileSystem.Create({
|
||||
* '/data': xhrfs
|
||||
* }, function(e, mfs) {
|
||||
* BrowserFS.initialize(mfs);
|
||||
*
|
||||
* // Added after-the-fact...
|
||||
* BrowserFS.FileSystem.LocalStorage.Create(function(e, lsfs) {
|
||||
* mfs.mount('/home', lsfs);
|
||||
* });
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* Since MountableFileSystem simply proxies requests to mounted file systems, it supports all of the operations that the mounted file systems support.
|
||||
*
|
||||
* With no mounted file systems, `MountableFileSystem` acts as a simple `InMemory` filesystem.
|
||||
*/
|
||||
export default class MountableFileSystem extends BaseFileSystem implements FileSystem {
|
||||
static readonly Name = "MountableFileSystem";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Creates a MountableFileSystem instance with the given options.
|
||||
*/
|
||||
static Create(opts: MountableFileSystemOptions, cb: BFSCallback<MountableFileSystem>): void;
|
||||
static isAvailable(): boolean;
|
||||
private mntMap;
|
||||
private mountList;
|
||||
private rootFs;
|
||||
/**
|
||||
* Creates a new, empty MountableFileSystem.
|
||||
*/
|
||||
private constructor();
|
||||
/**
|
||||
* Mounts the file system at the given mount point.
|
||||
*/
|
||||
mount(mountPoint: string, fs: FileSystem): void;
|
||||
umount(mountPoint: string): void;
|
||||
/**
|
||||
* Returns the file system that the path points to.
|
||||
*/
|
||||
_getFs(path: string): {
|
||||
fs: FileSystem;
|
||||
path: string;
|
||||
mountPoint: string;
|
||||
};
|
||||
getName(): string;
|
||||
diskSpace(path: string, cb: (total: number, free: number) => void): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Fixes up error messages so they mention the mounted file location relative
|
||||
* to the MFS root, not to the particular FS's root.
|
||||
* Mutates the input error, and returns it.
|
||||
*/
|
||||
standardizeError(err: ApiError, path: string, realPath: string): ApiError;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
readdirSync(p: string): string[];
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
realpathSync(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}): string;
|
||||
realpath(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}, cb: BFSCallback<string>): void;
|
||||
rmdirSync(p: string): void;
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Returns true if the given path contains a mount point.
|
||||
*/
|
||||
private _containsMountPt;
|
||||
}
|
||||
@@ -0,0 +1,454 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var InMemory_1 = require("./InMemory");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var node_fs_1 = require("../core/node_fs");
|
||||
var path = require("path");
|
||||
var util_1 = require("../core/util");
|
||||
/**
|
||||
* The MountableFileSystem allows you to mount multiple backend types or
|
||||
* multiple instantiations of the same backend into a single file system tree.
|
||||
* The file systems do not need to know about each other; all interactions are
|
||||
* automatically facilitated through this interface.
|
||||
*
|
||||
* For example, if a file system is mounted at /mnt/blah, and a request came in
|
||||
* for /mnt/blah/foo.txt, the file system would see a request for /foo.txt.
|
||||
*
|
||||
* You can mount file systems when you configure the file system:
|
||||
* ```javascript
|
||||
* BrowserFS.configure({
|
||||
* fs: "MountableFileSystem",
|
||||
* options: {
|
||||
* '/data': { fs: 'HTTPRequest', options: { index: "http://mysite.com/files/index.json" } },
|
||||
* '/home': { fs: 'LocalStorage' }
|
||||
* }
|
||||
* }, function(e) {
|
||||
*
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* For advanced users, you can also mount file systems *after* MFS is constructed:
|
||||
* ```javascript
|
||||
* BrowserFS.FileSystem.HTTPRequest.Create({
|
||||
* index: "http://mysite.com/files/index.json"
|
||||
* }, function(e, xhrfs) {
|
||||
* BrowserFS.FileSystem.MountableFileSystem.Create({
|
||||
* '/data': xhrfs
|
||||
* }, function(e, mfs) {
|
||||
* BrowserFS.initialize(mfs);
|
||||
*
|
||||
* // Added after-the-fact...
|
||||
* BrowserFS.FileSystem.LocalStorage.Create(function(e, lsfs) {
|
||||
* mfs.mount('/home', lsfs);
|
||||
* });
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* Since MountableFileSystem simply proxies requests to mounted file systems, it supports all of the operations that the mounted file systems support.
|
||||
*
|
||||
* With no mounted file systems, `MountableFileSystem` acts as a simple `InMemory` filesystem.
|
||||
*/
|
||||
var MountableFileSystem = /** @class */ (function (_super) {
|
||||
__extends(MountableFileSystem, _super);
|
||||
/**
|
||||
* Creates a new, empty MountableFileSystem.
|
||||
*/
|
||||
function MountableFileSystem(rootFs) {
|
||||
var _this = _super.call(this) || this;
|
||||
// Contains the list of mount points in mntMap, sorted by string length in decreasing order.
|
||||
// Ensures that we scan the most specific mount points for a match first, which lets us
|
||||
// nest mount points.
|
||||
_this.mountList = [];
|
||||
_this.mntMap = {};
|
||||
_this.rootFs = rootFs;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Creates a MountableFileSystem instance with the given options.
|
||||
*/
|
||||
MountableFileSystem.Create = function (opts, cb) {
|
||||
InMemory_1.default.Create({}, function (e, imfs) {
|
||||
if (imfs) {
|
||||
var fs_1 = new MountableFileSystem(imfs);
|
||||
try {
|
||||
Object.keys(opts).forEach(function (mountPoint) {
|
||||
fs_1.mount(mountPoint, opts[mountPoint]);
|
||||
});
|
||||
}
|
||||
catch (e) {
|
||||
return cb(e);
|
||||
}
|
||||
cb(null, fs_1);
|
||||
}
|
||||
else {
|
||||
cb(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
MountableFileSystem.isAvailable = function () {
|
||||
return true;
|
||||
};
|
||||
/**
|
||||
* Mounts the file system at the given mount point.
|
||||
*/
|
||||
MountableFileSystem.prototype.mount = function (mountPoint, fs) {
|
||||
if (mountPoint[0] !== '/') {
|
||||
mountPoint = "/".concat(mountPoint);
|
||||
}
|
||||
mountPoint = path.resolve(mountPoint);
|
||||
if (this.mntMap[mountPoint]) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Mount point " + mountPoint + " is already taken.");
|
||||
}
|
||||
(0, util_1.mkdirpSync)(mountPoint, 0x1ff, this.rootFs);
|
||||
this.mntMap[mountPoint] = fs;
|
||||
this.mountList.push(mountPoint);
|
||||
this.mountList = this.mountList.sort(function (a, b) { return b.length - a.length; });
|
||||
};
|
||||
MountableFileSystem.prototype.umount = function (mountPoint) {
|
||||
if (mountPoint[0] !== '/') {
|
||||
mountPoint = "/".concat(mountPoint);
|
||||
}
|
||||
mountPoint = path.resolve(mountPoint);
|
||||
if (!this.mntMap[mountPoint]) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Mount point " + mountPoint + " is already unmounted.");
|
||||
}
|
||||
delete this.mntMap[mountPoint];
|
||||
this.mountList.splice(this.mountList.indexOf(mountPoint), 1);
|
||||
while (mountPoint !== '/') {
|
||||
if (this.rootFs.readdirSync(mountPoint).length === 0) {
|
||||
this.rootFs.rmdirSync(mountPoint);
|
||||
mountPoint = path.dirname(mountPoint);
|
||||
}
|
||||
else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Returns the file system that the path points to.
|
||||
*/
|
||||
MountableFileSystem.prototype._getFs = function (path) {
|
||||
var mountList = this.mountList, len = mountList.length;
|
||||
for (var i = 0; i < len; i++) {
|
||||
var mountPoint = mountList[i];
|
||||
// We know path is normalized, so it is a substring of the mount point.
|
||||
if (mountPoint.length <= path.length && path.indexOf(mountPoint) === 0) {
|
||||
path = path.substr(mountPoint.length > 1 ? mountPoint.length : 0);
|
||||
if (path === '') {
|
||||
path = '/';
|
||||
}
|
||||
return { fs: this.mntMap[mountPoint], path: path, mountPoint: mountPoint };
|
||||
}
|
||||
}
|
||||
// Query our root file system.
|
||||
return { fs: this.rootFs, path: path, mountPoint: '/' };
|
||||
};
|
||||
// Global information methods
|
||||
MountableFileSystem.prototype.getName = function () {
|
||||
return MountableFileSystem.Name;
|
||||
};
|
||||
MountableFileSystem.prototype.diskSpace = function (path, cb) {
|
||||
cb(0, 0);
|
||||
};
|
||||
MountableFileSystem.prototype.isReadOnly = function () {
|
||||
return false;
|
||||
};
|
||||
MountableFileSystem.prototype.supportsLinks = function () {
|
||||
// I'm not ready for cross-FS links yet.
|
||||
return false;
|
||||
};
|
||||
MountableFileSystem.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
MountableFileSystem.prototype.supportsSynch = function () {
|
||||
return true;
|
||||
};
|
||||
/**
|
||||
* Fixes up error messages so they mention the mounted file location relative
|
||||
* to the MFS root, not to the particular FS's root.
|
||||
* Mutates the input error, and returns it.
|
||||
*/
|
||||
MountableFileSystem.prototype.standardizeError = function (err, path, realPath) {
|
||||
var index = err.message.indexOf(path);
|
||||
if (index !== -1) {
|
||||
err.message = err.message.substr(0, index) + realPath + err.message.substr(index + path.length);
|
||||
err.path = realPath;
|
||||
}
|
||||
return err;
|
||||
};
|
||||
// The following methods involve multiple file systems, and thus have custom
|
||||
// logic.
|
||||
// Note that we go through the Node API to use its robust default argument
|
||||
// processing.
|
||||
MountableFileSystem.prototype.rename = function (oldPath, newPath, cb) {
|
||||
var _this = this;
|
||||
// Scenario 1: old and new are on same FS.
|
||||
var fs1rv = this._getFs(oldPath);
|
||||
var fs2rv = this._getFs(newPath);
|
||||
if (fs1rv.fs === fs2rv.fs) {
|
||||
return fs1rv.fs.rename(fs1rv.path, fs2rv.path, function (e) {
|
||||
if (e) {
|
||||
_this.standardizeError(_this.standardizeError(e, fs1rv.path, oldPath), fs2rv.path, newPath);
|
||||
}
|
||||
cb(e);
|
||||
});
|
||||
}
|
||||
// Scenario 2: Different file systems.
|
||||
// Read old file, write new file, delete old file.
|
||||
return node_fs_1.default.readFile(oldPath, function (err, data) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
node_fs_1.default.writeFile(newPath, data, function (err) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
node_fs_1.default.unlink(oldPath, cb);
|
||||
});
|
||||
});
|
||||
};
|
||||
MountableFileSystem.prototype.renameSync = function (oldPath, newPath) {
|
||||
// Scenario 1: old and new are on same FS.
|
||||
var fs1rv = this._getFs(oldPath);
|
||||
var fs2rv = this._getFs(newPath);
|
||||
if (fs1rv.fs === fs2rv.fs) {
|
||||
try {
|
||||
return fs1rv.fs.renameSync(fs1rv.path, fs2rv.path);
|
||||
}
|
||||
catch (e) {
|
||||
this.standardizeError(this.standardizeError(e, fs1rv.path, oldPath), fs2rv.path, newPath);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
// Scenario 2: Different file systems.
|
||||
var data = node_fs_1.default.readFileSync(oldPath);
|
||||
node_fs_1.default.writeFileSync(newPath, data);
|
||||
return node_fs_1.default.unlinkSync(oldPath);
|
||||
};
|
||||
MountableFileSystem.prototype.readdirSync = function (p) {
|
||||
var fsInfo = this._getFs(p);
|
||||
// If null, rootfs did not have the directory
|
||||
// (or the target FS is the root fs).
|
||||
var rv = null;
|
||||
// Mount points are all defined in the root FS.
|
||||
// Ensure that we list those, too.
|
||||
if (fsInfo.fs !== this.rootFs) {
|
||||
try {
|
||||
rv = this.rootFs.readdirSync(p);
|
||||
}
|
||||
catch (e) {
|
||||
// Ignore.
|
||||
}
|
||||
}
|
||||
try {
|
||||
var rv2_1 = fsInfo.fs.readdirSync(fsInfo.path);
|
||||
if (rv === null) {
|
||||
return rv2_1;
|
||||
}
|
||||
else {
|
||||
// Filter out duplicates.
|
||||
return rv2_1.concat(rv.filter(function (val) { return rv2_1.indexOf(val) === -1; }));
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (rv === null) {
|
||||
throw this.standardizeError(e, fsInfo.path, p);
|
||||
}
|
||||
else {
|
||||
// The root FS had something.
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
};
|
||||
MountableFileSystem.prototype.readdir = function (p, cb) {
|
||||
var _this = this;
|
||||
var fsInfo = this._getFs(p);
|
||||
fsInfo.fs.readdir(fsInfo.path, function (err, files) {
|
||||
if (fsInfo.fs !== _this.rootFs) {
|
||||
try {
|
||||
var rv = _this.rootFs.readdirSync(p);
|
||||
if (files) {
|
||||
// Filter out duplicates.
|
||||
files = files.concat(rv.filter(function (val) { return files.indexOf(val) === -1; }));
|
||||
}
|
||||
else {
|
||||
files = rv;
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// Root FS and target FS did not have directory.
|
||||
if (err) {
|
||||
return cb(_this.standardizeError(err, fsInfo.path, p));
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (err) {
|
||||
// Root FS and target FS are the same, and did not have directory.
|
||||
return cb(_this.standardizeError(err, fsInfo.path, p));
|
||||
}
|
||||
cb(null, files);
|
||||
});
|
||||
};
|
||||
MountableFileSystem.prototype.realpathSync = function (p, cache) {
|
||||
var fsInfo = this._getFs(p);
|
||||
try {
|
||||
var mountedPath = fsInfo.fs.realpathSync(fsInfo.path, {});
|
||||
// resolve is there to remove any trailing slash that may be present
|
||||
return path.resolve(path.join(fsInfo.mountPoint, mountedPath));
|
||||
}
|
||||
catch (e) {
|
||||
throw this.standardizeError(e, fsInfo.path, p);
|
||||
}
|
||||
};
|
||||
MountableFileSystem.prototype.realpath = function (p, cache, cb) {
|
||||
var _this = this;
|
||||
var fsInfo = this._getFs(p);
|
||||
fsInfo.fs.realpath(fsInfo.path, {}, function (err, rv) {
|
||||
if (err) {
|
||||
cb(_this.standardizeError(err, fsInfo.path, p));
|
||||
}
|
||||
else {
|
||||
// resolve is there to remove any trailing slash that may be present
|
||||
cb(null, path.resolve(path.join(fsInfo.mountPoint, rv)));
|
||||
}
|
||||
});
|
||||
};
|
||||
MountableFileSystem.prototype.rmdirSync = function (p) {
|
||||
var fsInfo = this._getFs(p);
|
||||
if (this._containsMountPt(p)) {
|
||||
throw api_error_1.ApiError.ENOTEMPTY(p);
|
||||
}
|
||||
else {
|
||||
try {
|
||||
fsInfo.fs.rmdirSync(fsInfo.path);
|
||||
}
|
||||
catch (e) {
|
||||
throw this.standardizeError(e, fsInfo.path, p);
|
||||
}
|
||||
}
|
||||
};
|
||||
MountableFileSystem.prototype.rmdir = function (p, cb) {
|
||||
var _this = this;
|
||||
var fsInfo = this._getFs(p);
|
||||
if (this._containsMountPt(p)) {
|
||||
cb(api_error_1.ApiError.ENOTEMPTY(p));
|
||||
}
|
||||
else {
|
||||
fsInfo.fs.rmdir(fsInfo.path, function (err) {
|
||||
cb(err ? _this.standardizeError(err, fsInfo.path, p) : null);
|
||||
});
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Returns true if the given path contains a mount point.
|
||||
*/
|
||||
MountableFileSystem.prototype._containsMountPt = function (p) {
|
||||
var mountPoints = this.mountList, len = mountPoints.length;
|
||||
for (var i = 0; i < len; i++) {
|
||||
var pt = mountPoints[i];
|
||||
if (pt.length >= p.length && pt.slice(0, p.length) === p) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
MountableFileSystem.Name = "MountableFileSystem";
|
||||
MountableFileSystem.Options = {};
|
||||
return MountableFileSystem;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = MountableFileSystem;
|
||||
/**
|
||||
* Tricky: Define all of the functions that merely forward arguments to the
|
||||
* relevant file system, or return/throw an error.
|
||||
* Take advantage of the fact that the *first* argument is always the path, and
|
||||
* the *last* is the callback function (if async).
|
||||
* @todo Can use numArgs to make proxying more efficient.
|
||||
* @hidden
|
||||
*/
|
||||
function defineFcn(name, isSync, numArgs) {
|
||||
if (isSync) {
|
||||
return function () {
|
||||
var args = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
args[_i] = arguments[_i];
|
||||
}
|
||||
var path = args[0];
|
||||
var rv = this._getFs(path);
|
||||
args[0] = rv.path;
|
||||
try {
|
||||
return rv.fs[name].apply(rv.fs, args);
|
||||
}
|
||||
catch (e) {
|
||||
this.standardizeError(e, rv.path, path);
|
||||
throw e;
|
||||
}
|
||||
};
|
||||
}
|
||||
else {
|
||||
return function () {
|
||||
var _this = this;
|
||||
var args = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
args[_i] = arguments[_i];
|
||||
}
|
||||
var path = args[0];
|
||||
var rv = this._getFs(path);
|
||||
args[0] = rv.path;
|
||||
if (typeof args[args.length - 1] === 'function') {
|
||||
var cb_1 = args[args.length - 1];
|
||||
args[args.length - 1] = function () {
|
||||
var args = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
args[_i] = arguments[_i];
|
||||
}
|
||||
if (args.length > 0 && args[0] instanceof api_error_1.ApiError) {
|
||||
_this.standardizeError(args[0], rv.path, path);
|
||||
}
|
||||
cb_1.apply(null, args);
|
||||
};
|
||||
}
|
||||
return rv.fs[name].apply(rv.fs, args);
|
||||
};
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var fsCmdMap = [
|
||||
// 1 arg functions
|
||||
['exists', 'unlink', 'readlink'],
|
||||
// 2 arg functions
|
||||
['stat', 'mkdir', 'truncate'],
|
||||
// 3 arg functions
|
||||
['open', 'readFile', 'chmod', 'utimes'],
|
||||
// 4 arg functions
|
||||
['chown'],
|
||||
// 5 arg functions
|
||||
['writeFile', 'appendFile']
|
||||
];
|
||||
for (var i = 0; i < fsCmdMap.length; i++) {
|
||||
var cmds = fsCmdMap[i];
|
||||
for (var _i = 0, cmds_1 = cmds; _i < cmds_1.length; _i++) {
|
||||
var fnName = cmds_1[_i];
|
||||
MountableFileSystem.prototype[fnName] = defineFcn(fnName, false, i + 1);
|
||||
MountableFileSystem.prototype[fnName + 'Sync'] = defineFcn(fnName + 'Sync', true, i + 1);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=MountableFileSystem.js.map
|
||||
124
sandpack-generated/static/browserfs11/node/backend/OverlayFS.d.ts
vendored
Normal file
124
sandpack-generated/static/browserfs11/node/backend/OverlayFS.d.ts
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
import { FileSystem, BaseFileSystem, BFSOneArgCallback, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { File } from '../core/file';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import PreloadFile from '../generic/preload_file';
|
||||
import LockedFS from '../generic/locked_fs';
|
||||
/**
|
||||
* *INTERNAL, DO NOT USE DIRECTLY!*
|
||||
*
|
||||
* Core OverlayFS class that contains no locking whatsoever. We wrap these objects
|
||||
* in a LockedFS to prevent races.
|
||||
*/
|
||||
export declare class UnlockedOverlayFS extends BaseFileSystem implements FileSystem {
|
||||
static isAvailable(): boolean;
|
||||
private _writable;
|
||||
private _readable;
|
||||
private _isInitialized;
|
||||
private _initializeCallbacks;
|
||||
private _deletedFiles;
|
||||
private _deleteLog;
|
||||
private _deleteLogUpdatePending;
|
||||
private _deleteLogUpdateNeeded;
|
||||
private _deleteLogError;
|
||||
constructor(writable: FileSystem, readable: FileSystem);
|
||||
getOverlayedFileSystems(): {
|
||||
readable: FileSystem;
|
||||
writable: FileSystem;
|
||||
};
|
||||
_syncAsync(file: PreloadFile<UnlockedOverlayFS>, cb: BFSOneArgCallback): void;
|
||||
_syncSync(file: PreloadFile<UnlockedOverlayFS>): void;
|
||||
getName(): string;
|
||||
/**
|
||||
* **INTERNAL METHOD**
|
||||
*
|
||||
* Called once to load up metadata stored on the writable file system.
|
||||
*/
|
||||
_initialize(cb: BFSOneArgCallback): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
getDeletionLog(): string;
|
||||
restoreDeletionLog(log: string): void;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
stat(p: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
statSync(p: string, isLstat: boolean): Stats;
|
||||
open(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openSync(p: string, flag: FileFlag, mode: number): File;
|
||||
unlink(p: string, cb: BFSOneArgCallback): void;
|
||||
unlinkSync(p: string): void;
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdir(p: string, mode: number, cb: BFSCallback<Stats>): void;
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(p: string): string[];
|
||||
exists(p: string, cb: (exists: boolean) => void): void;
|
||||
existsSync(p: string): boolean;
|
||||
chmod(p: string, isLchmod: boolean, mode: number, cb: BFSOneArgCallback): void;
|
||||
chmodSync(p: string, isLchmod: boolean, mode: number): void;
|
||||
chown(p: string, isLchmod: boolean, uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
chownSync(p: string, isLchown: boolean, uid: number, gid: number): void;
|
||||
utimes(p: string, atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
utimesSync(p: string, atime: Date, mtime: Date): void;
|
||||
private deletePath;
|
||||
private updateLog;
|
||||
private _reparseDeletionLog;
|
||||
private checkInitialized;
|
||||
private checkInitAsync;
|
||||
private checkPath;
|
||||
private checkPathAsync;
|
||||
private createParentDirectoriesAsync;
|
||||
/**
|
||||
* With the given path, create the needed parent directories on the writable storage
|
||||
* should they not exist. Use modes from the read-only storage.
|
||||
*/
|
||||
private createParentDirectories;
|
||||
/**
|
||||
* Helper function:
|
||||
* - Ensures p is on writable before proceeding. Throws an error if it doesn't exist.
|
||||
* - Calls f to perform operation on writable.
|
||||
*/
|
||||
private operateOnWritable;
|
||||
private operateOnWritableAsync;
|
||||
/**
|
||||
* Copy from readable to writable storage.
|
||||
* PRECONDITION: File does not exist on writable storage.
|
||||
*/
|
||||
private copyToWritable;
|
||||
private copyToWritableAsync;
|
||||
}
|
||||
/**
|
||||
* Configuration options for OverlayFS instances.
|
||||
*/
|
||||
export interface OverlayFSOptions {
|
||||
writable: FileSystem;
|
||||
readable: FileSystem;
|
||||
}
|
||||
/**
|
||||
* OverlayFS makes a read-only filesystem writable by storing writes on a second,
|
||||
* writable file system. Deletes are persisted via metadata stored on the writable
|
||||
* file system.
|
||||
*/
|
||||
export default class OverlayFS extends LockedFS<UnlockedOverlayFS> {
|
||||
static readonly Name = "OverlayFS";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Constructs and initializes an OverlayFS instance with the given options.
|
||||
*/
|
||||
static Create(opts: OverlayFSOptions, cb: BFSCallback<OverlayFS>): void;
|
||||
static isAvailable(): boolean;
|
||||
/**
|
||||
* @param writable The file system to write modified files to.
|
||||
* @param readable The file system that initially populates this file system.
|
||||
*/
|
||||
constructor(writable: FileSystem, readable: FileSystem);
|
||||
getOverlayedFileSystems(): {
|
||||
readable: FileSystem;
|
||||
writable: FileSystem;
|
||||
};
|
||||
unwrap(): UnlockedOverlayFS;
|
||||
private _initialize;
|
||||
}
|
||||
1001
sandpack-generated/static/browserfs11/node/backend/OverlayFS.js
Normal file
1001
sandpack-generated/static/browserfs11/node/backend/OverlayFS.js
Normal file
File diff suppressed because it is too large
Load Diff
114
sandpack-generated/static/browserfs11/node/backend/UNPKGRequest.d.ts
vendored
Normal file
114
sandpack-generated/static/browserfs11/node/backend/UNPKGRequest.d.ts
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFileSystem, FileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { File } from '../core/file';
|
||||
import Stats from '../core/node_fs_stats';
|
||||
/**
|
||||
* Configuration options for a HTTPRequest file system.
|
||||
*/
|
||||
export interface UNPKGRequestOptions {
|
||||
dependency: string;
|
||||
version: string;
|
||||
preferXHR?: boolean;
|
||||
}
|
||||
export type UNPKGMeta = UNPKGMetaDirectory;
|
||||
export interface UNPKGMetaFile {
|
||||
path: string;
|
||||
type: "file";
|
||||
contentType: string;
|
||||
integrity: string;
|
||||
lastModified: string;
|
||||
size: number;
|
||||
}
|
||||
export interface UNPKGMetaDirectory {
|
||||
path: string;
|
||||
type: "directory";
|
||||
files: Array<UNPKGMetaDirectory | UNPKGMetaFile>;
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
export default class UNPKGRequest extends BaseFileSystem implements FileSystem {
|
||||
private dependency;
|
||||
private version;
|
||||
static readonly Name = "UNPKGRequest";
|
||||
static readonly Options: FileSystemOptions;
|
||||
/**
|
||||
* Construct an HTTPRequest file system backend with the given options.
|
||||
*/
|
||||
static Create(opts: UNPKGRequestOptions, cb: BFSCallback<UNPKGRequest>): void;
|
||||
static isAvailable(): boolean;
|
||||
readonly prefixUrl: string;
|
||||
private _index;
|
||||
private _requestFileAsyncInternal;
|
||||
private _requestFileSizeAsyncInternal;
|
||||
private _requestFileSyncInternal;
|
||||
private _requestFileSizeSyncInternal;
|
||||
private constructor();
|
||||
empty(): void;
|
||||
getName(): string;
|
||||
diskSpace(path: string, cb: (total: number, free: number) => void): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Special HTTPFS function: Preload the given file into the index.
|
||||
* @param [String] path
|
||||
* @param [BrowserFS.Buffer] buffer
|
||||
*/
|
||||
preloadFile(path: string, buffer: Buffer): void;
|
||||
stat(path: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
statSync(path: string, isLstat: boolean): Stats;
|
||||
open(path: string, flags: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openSync(path: string, flags: FileFlag, mode: number): File;
|
||||
readdir(path: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(path: string): string[];
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
readFile(fname: string, encoding: string, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
private _getHTTPPath;
|
||||
/**
|
||||
* Asynchronously download the given file.
|
||||
*/
|
||||
private _requestFileAsync;
|
||||
/**
|
||||
* Synchronously download the given file.
|
||||
*/
|
||||
private _requestFileSync;
|
||||
/**
|
||||
* Only requests the HEAD content, for the file size.
|
||||
*/
|
||||
private _requestFileSizeAsync;
|
||||
private _requestFileSizeSync;
|
||||
}
|
||||
@@ -0,0 +1,397 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var file_flag_1 = require("../core/file_flag");
|
||||
var util_1 = require("../core/util");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var xhr_1 = require("../generic/xhr");
|
||||
var fetch_1 = require("../generic/fetch");
|
||||
var file_index_1 = require("../generic/file_index");
|
||||
/**
|
||||
* Try to convert the given buffer into a string, and pass it to the callback.
|
||||
* Optimization that removes the needed try/catch into a helper function, as
|
||||
* this is an uncommon case.
|
||||
* @hidden
|
||||
*/
|
||||
function tryToString(buff, encoding, cb) {
|
||||
try {
|
||||
cb(null, buff.toString(encoding));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
}
|
||||
function syncNotAvailableError() {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP, "Synchronous HTTP download methods are not available in this environment.");
|
||||
}
|
||||
/**
|
||||
* A simple filesystem backed by HTTP downloads. You must create a directory listing using the
|
||||
* `make_http_index` tool provided by BrowserFS.
|
||||
*
|
||||
* If you install BrowserFS globally with `npm i -g browserfs`, you can generate a listing by
|
||||
* running `make_http_index` in your terminal in the directory you would like to index:
|
||||
*
|
||||
* ```
|
||||
* make_http_index > index.json
|
||||
* ```
|
||||
*
|
||||
* Listings objects look like the following:
|
||||
*
|
||||
* ```json
|
||||
* {
|
||||
* "home": {
|
||||
* "jvilk": {
|
||||
* "someFile.txt": null,
|
||||
* "someDir": {
|
||||
* // Empty directory
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* *This example has the folder `/home/jvilk` with subfile `someFile.txt` and subfolder `someDir`.*
|
||||
*/
|
||||
var UNPKGRequest = /** @class */ (function (_super) {
|
||||
__extends(UNPKGRequest, _super);
|
||||
function UNPKGRequest(meta, dependency, version, preferXHR) {
|
||||
if (preferXHR === void 0) { preferXHR = false; }
|
||||
var _this = _super.call(this) || this;
|
||||
_this.dependency = dependency;
|
||||
_this.version = version;
|
||||
_this._index = file_index_1.FileIndex.fromUnpkg(meta);
|
||||
if (fetch_1.fetchIsAvailable && (!preferXHR || !xhr_1.xhrIsAvailable)) {
|
||||
_this._requestFileAsyncInternal = fetch_1.fetchFileAsync;
|
||||
_this._requestFileSizeAsyncInternal = fetch_1.fetchFileSizeAsync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileAsyncInternal = xhr_1.asyncDownloadFile;
|
||||
_this._requestFileSizeAsyncInternal = xhr_1.getFileSizeAsync;
|
||||
}
|
||||
if (xhr_1.xhrIsAvailable) {
|
||||
_this._requestFileSyncInternal = xhr_1.syncDownloadFile;
|
||||
_this._requestFileSizeSyncInternal = xhr_1.getFileSizeSync;
|
||||
}
|
||||
else {
|
||||
_this._requestFileSyncInternal = syncNotAvailableError;
|
||||
_this._requestFileSizeSyncInternal = syncNotAvailableError;
|
||||
}
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Construct an HTTPRequest file system backend with the given options.
|
||||
*/
|
||||
UNPKGRequest.Create = function (opts, cb) {
|
||||
var URL = "https://unpkg.com/".concat(opts.dependency, "@").concat(opts.version);
|
||||
(0, xhr_1.asyncDownloadFile)("".concat(URL, "/?meta"), "json", function (e, data) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
cb(null, new UNPKGRequest(data, opts.dependency, opts.version));
|
||||
}
|
||||
});
|
||||
};
|
||||
UNPKGRequest.isAvailable = function () {
|
||||
return xhr_1.xhrIsAvailable || fetch_1.fetchIsAvailable;
|
||||
};
|
||||
UNPKGRequest.prototype.empty = function () {
|
||||
this._index.fileIterator(function (file) {
|
||||
file.fileData = null;
|
||||
});
|
||||
};
|
||||
UNPKGRequest.prototype.getName = function () {
|
||||
return UNPKGRequest.Name;
|
||||
};
|
||||
UNPKGRequest.prototype.diskSpace = function (path, cb) {
|
||||
// Read-only file system. We could calculate the total space, but that's not
|
||||
// important right now.
|
||||
cb(0, 0);
|
||||
};
|
||||
UNPKGRequest.prototype.isReadOnly = function () {
|
||||
return true;
|
||||
};
|
||||
UNPKGRequest.prototype.supportsLinks = function () {
|
||||
return false;
|
||||
};
|
||||
UNPKGRequest.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
UNPKGRequest.prototype.supportsSynch = function () {
|
||||
// Synchronous operations are only available via the XHR interface for now.
|
||||
return xhr_1.xhrIsAvailable;
|
||||
};
|
||||
/**
|
||||
* Special HTTPFS function: Preload the given file into the index.
|
||||
* @param [String] path
|
||||
* @param [BrowserFS.Buffer] buffer
|
||||
*/
|
||||
UNPKGRequest.prototype.preloadFile = function (path, buffer) {
|
||||
var inode = this._index.getInode(path);
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats = inode.getData();
|
||||
stats.size = buffer.length;
|
||||
stats.fileData = buffer;
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
UNPKGRequest.prototype.stat = function (path, isLstat, cb) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
return cb(api_error_1.ApiError.ENOENT(path));
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData();
|
||||
// At this point, a non-opened file will still have default stats from the listing.
|
||||
if (stats.size < 0) {
|
||||
this._requestFileSizeAsync(path, function (e, size) {
|
||||
if (e) {
|
||||
return cb(e);
|
||||
}
|
||||
stats.size = size;
|
||||
cb(null, node_fs_stats_1.default.clone(stats));
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, node_fs_stats_1.default.clone(stats));
|
||||
}
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
cb(null, stats);
|
||||
}
|
||||
else {
|
||||
cb(api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, path));
|
||||
}
|
||||
};
|
||||
UNPKGRequest.prototype.statSync = function (path, isLstat) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData();
|
||||
// At this point, a non-opened file will still have default stats from the listing.
|
||||
if (stats.size < 0) {
|
||||
stats.size = this._requestFileSizeSync(path);
|
||||
}
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.FileError(api_error_1.ErrorCode.EINVAL, path);
|
||||
}
|
||||
return stats;
|
||||
};
|
||||
UNPKGRequest.prototype.open = function (path, flags, mode, cb) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path));
|
||||
}
|
||||
var self = this;
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
return cb(api_error_1.ApiError.ENOENT(path));
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var stats_1 = inode.getData();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
return cb(api_error_1.ApiError.EEXIST(path));
|
||||
case file_flag_1.ActionType.NOP:
|
||||
// Use existing file contents.
|
||||
// XXX: Uh, this maintains the previously-used flag.
|
||||
if (stats_1.fileData) {
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.clone(stats_1), stats_1.fileData));
|
||||
}
|
||||
// @todo be lazier about actually requesting the file
|
||||
this._requestFileAsync(path, 'buffer', function (err, buffer) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
// we don't initially have file sizes
|
||||
stats_1.size = buffer.length;
|
||||
stats_1.fileData = buffer;
|
||||
return cb(null, new preload_file_1.NoSyncFile(self, path, flags, node_fs_stats_1.default.clone(stats_1), buffer));
|
||||
});
|
||||
break;
|
||||
default:
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.'));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return cb(api_error_1.ApiError.EISDIR(path));
|
||||
}
|
||||
};
|
||||
UNPKGRequest.prototype.openSync = function (path, flags, mode) {
|
||||
// INVARIANT: You can't write to files on this file system.
|
||||
if (flags.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path);
|
||||
}
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var stats = inode.getData();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
throw api_error_1.ApiError.EEXIST(path);
|
||||
case file_flag_1.ActionType.NOP:
|
||||
// Use existing file contents.
|
||||
// XXX: Uh, this maintains the previously-used flag.
|
||||
if (stats.fileData) {
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, node_fs_stats_1.default.clone(stats), stats.fileData);
|
||||
}
|
||||
// @todo be lazier about actually requesting the file
|
||||
var buffer = this._requestFileSync(path, 'buffer');
|
||||
// we don't initially have file sizes
|
||||
stats.size = buffer.length;
|
||||
stats.fileData = buffer;
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, node_fs_stats_1.default.clone(stats), buffer);
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.');
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
UNPKGRequest.prototype.readdir = function (path, cb) {
|
||||
try {
|
||||
cb(null, this.readdirSync(path));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
UNPKGRequest.prototype.readdirSync = function (path) {
|
||||
// Check if it exists.
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
return inode.getListing();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.ENOTDIR(path);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* We have the entire file as a buffer; optimize readFile.
|
||||
*/
|
||||
UNPKGRequest.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
// Wrap cb in file closing code.
|
||||
var oldCb = cb;
|
||||
// Get file.
|
||||
this.open(fname, flag, 0x1a4, function (err, fd) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
cb = function (err, arg) {
|
||||
fd.close(function (err2) {
|
||||
if (!err) {
|
||||
err = err2;
|
||||
}
|
||||
return oldCb(err, arg);
|
||||
});
|
||||
};
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
cb(err, (0, util_1.copyingSlice)(fdBuff));
|
||||
}
|
||||
else {
|
||||
tryToString(fdBuff, encoding, cb);
|
||||
}
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
UNPKGRequest.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
// Get file.
|
||||
var fd = this.openSync(fname, flag, 0x1a4);
|
||||
try {
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
return (0, util_1.copyingSlice)(fdBuff);
|
||||
}
|
||||
return fdBuff.toString(encoding);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
UNPKGRequest.prototype._getHTTPPath = function (filePath) {
|
||||
if (filePath.charAt(0) === '/') {
|
||||
filePath = filePath.slice(1);
|
||||
}
|
||||
return "https://unpkg.com/".concat(this.dependency, "@").concat(this.version, "/").concat(filePath);
|
||||
};
|
||||
UNPKGRequest.prototype._requestFileAsync = function (p, type, cb) {
|
||||
this._requestFileAsyncInternal(this._getHTTPPath(p), type, cb);
|
||||
};
|
||||
UNPKGRequest.prototype._requestFileSync = function (p, type) {
|
||||
return this._requestFileSyncInternal(this._getHTTPPath(p), type);
|
||||
};
|
||||
/**
|
||||
* Only requests the HEAD content, for the file size.
|
||||
*/
|
||||
UNPKGRequest.prototype._requestFileSizeAsync = function (path, cb) {
|
||||
this._requestFileSizeAsyncInternal(this._getHTTPPath(path), cb);
|
||||
};
|
||||
UNPKGRequest.prototype._requestFileSizeSync = function (path) {
|
||||
return this._requestFileSizeSyncInternal(this._getHTTPPath(path));
|
||||
};
|
||||
UNPKGRequest.Name = "UNPKGRequest";
|
||||
UNPKGRequest.Options = {
|
||||
dependency: {
|
||||
type: "string",
|
||||
description: "Name of dependency"
|
||||
},
|
||||
version: {
|
||||
type: "string",
|
||||
description: "Version of dependency, can be semver"
|
||||
},
|
||||
preferXHR: {
|
||||
type: "boolean",
|
||||
optional: true,
|
||||
description: "Whether to prefer XmlHttpRequest or fetch for async operations if both are available. Default: false"
|
||||
}
|
||||
};
|
||||
return UNPKGRequest;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = UNPKGRequest;
|
||||
//# sourceMappingURL=UNPKGRequest.js.map
|
||||
25
sandpack-generated/static/browserfs11/node/backend/WebsocketFS.d.ts
vendored
Normal file
25
sandpack-generated/static/browserfs11/node/backend/WebsocketFS.d.ts
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
/// <reference types="node" />
|
||||
import { FileFlag } from "../core/file_flag";
|
||||
import { BFSCallback, FileSystem, FileSystemOptions, SynchronousFileSystem } from "../core/file_system";
|
||||
import Stats from '../core/node_fs_stats';
|
||||
export interface Socket {
|
||||
emit: (data: any, cb: (answer: any) => void) => void;
|
||||
dispose: () => void;
|
||||
}
|
||||
export interface WebsocketFSOptions {
|
||||
socket: Socket;
|
||||
}
|
||||
export default class WebsocketFS extends SynchronousFileSystem implements FileSystem {
|
||||
static readonly Name = "WebsocketFS";
|
||||
static readonly Options: FileSystemOptions;
|
||||
static Create(options: WebsocketFSOptions, cb: BFSCallback<WebsocketFS>): void;
|
||||
static isAvailable(): boolean;
|
||||
private socket;
|
||||
constructor(options: WebsocketFSOptions);
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
readFile(fname: string, encoding: string | null, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
stat(p: string, isLstat: boolean | null, cb: BFSCallback<Stats>): void;
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
"use strict";
|
||||
/* eslint-disable max-classes-per-file */
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
var __assign = (this && this.__assign) || function () {
|
||||
__assign = Object.assign || function(t) {
|
||||
for (var s, i = 1, n = arguments.length; i < n; i++) {
|
||||
s = arguments[i];
|
||||
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
|
||||
t[p] = s[p];
|
||||
}
|
||||
return t;
|
||||
};
|
||||
return __assign.apply(this, arguments);
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var WebsocketFS = /** @class */ (function (_super) {
|
||||
__extends(WebsocketFS, _super);
|
||||
function WebsocketFS(options) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this.socket = options.socket;
|
||||
return _this;
|
||||
}
|
||||
WebsocketFS.Create = function (options, cb) {
|
||||
cb(null, new WebsocketFS(options));
|
||||
};
|
||||
WebsocketFS.isAvailable = function () {
|
||||
return true;
|
||||
};
|
||||
WebsocketFS.prototype.getName = function () {
|
||||
return "WebsocketFS";
|
||||
};
|
||||
WebsocketFS.prototype.isReadOnly = function () {
|
||||
return false;
|
||||
};
|
||||
WebsocketFS.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
WebsocketFS.prototype.supportsSynch = function () {
|
||||
return true;
|
||||
};
|
||||
WebsocketFS.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
try {
|
||||
this.socket.emit({
|
||||
method: 'readFile',
|
||||
args: {
|
||||
path: fname,
|
||||
encoding: encoding,
|
||||
flag: flag
|
||||
}
|
||||
}, function (_a) {
|
||||
var error = _a.error, data = _a.data;
|
||||
if (data) {
|
||||
cb(null, Buffer.from(data));
|
||||
}
|
||||
else {
|
||||
cb(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
WebsocketFS.prototype.stat = function (p, isLstat, cb) {
|
||||
try {
|
||||
this.socket.emit({
|
||||
method: 'stat',
|
||||
args: {
|
||||
path: p,
|
||||
isLstat: isLstat
|
||||
}
|
||||
}, function (_a) {
|
||||
var error = _a.error, data = _a.data;
|
||||
if (data) {
|
||||
cb(null, __assign(__assign({}, data), { atime: new Date(data.atime), mtime: new Date(data.mtime), ctime: new Date(data.ctime), birthtime: new Date(data.birthtime) }));
|
||||
}
|
||||
else {
|
||||
cb(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
WebsocketFS.Name = "WebsocketFS";
|
||||
WebsocketFS.Options = {
|
||||
socket: {
|
||||
type: "object",
|
||||
description: "The socket emitter",
|
||||
validator: function (opt, cb) {
|
||||
if (opt) {
|
||||
cb();
|
||||
}
|
||||
else {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Manager is invalid"));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
return WebsocketFS;
|
||||
}(file_system_1.SynchronousFileSystem));
|
||||
exports.default = WebsocketFS;
|
||||
/*
|
||||
this.statSync(p, isLstat || true)
|
||||
*/
|
||||
//# sourceMappingURL=WebsocketFS.js.map
|
||||
92
sandpack-generated/static/browserfs11/node/backend/WorkerFS.d.ts
vendored
Normal file
92
sandpack-generated/static/browserfs11/node/backend/WorkerFS.d.ts
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
import { BaseFileSystem, FileSystem, BFSOneArgCallback, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { File } from '../core/file';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
export interface WorkerFSOptions {
|
||||
worker: Worker;
|
||||
}
|
||||
/**
|
||||
* WorkerFS lets you access a BrowserFS instance that is running in a different
|
||||
* JavaScript context (e.g. access BrowserFS in one of your WebWorkers, or
|
||||
* access BrowserFS running on the main page from a WebWorker).
|
||||
*
|
||||
* For example, to have a WebWorker access files in the main browser thread,
|
||||
* do the following:
|
||||
*
|
||||
* MAIN BROWSER THREAD:
|
||||
*
|
||||
* ```javascript
|
||||
* // Listen for remote file system requests.
|
||||
* BrowserFS.FileSystem.WorkerFS.attachRemoteListener(webWorkerObject);
|
||||
* ```
|
||||
*
|
||||
* WEBWORKER THREAD:
|
||||
*
|
||||
* ```javascript
|
||||
* // Set the remote file system as the root file system.
|
||||
* BrowserFS.configure({ fs: "WorkerFS", options: { worker: self }}, function(e) {
|
||||
* // Ready!
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* Note that synchronous operations are not permitted on the WorkerFS, regardless
|
||||
* of the configuration option of the remote FS.
|
||||
*/
|
||||
export default class WorkerFS extends BaseFileSystem implements FileSystem {
|
||||
static readonly Name = "WorkerFS";
|
||||
static readonly Options: FileSystemOptions;
|
||||
static Create(opts: WorkerFSOptions, cb: BFSCallback<WorkerFS>): void;
|
||||
static isAvailable(): boolean;
|
||||
/**
|
||||
* Attaches a listener to the remote worker for file system requests.
|
||||
*/
|
||||
static attachRemoteListener(worker: Worker): void;
|
||||
private _worker;
|
||||
private _callbackConverter;
|
||||
private _isInitialized;
|
||||
private _isReadOnly;
|
||||
private _supportLinks;
|
||||
private _supportProps;
|
||||
/**
|
||||
* Constructs a new WorkerFS instance that connects with BrowserFS running on
|
||||
* the specified worker.
|
||||
*/
|
||||
private constructor();
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
stat(p: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
open(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
unlink(p: string, cb: Function): void;
|
||||
rmdir(p: string, cb: Function): void;
|
||||
mkdir(p: string, mode: number, cb: Function): void;
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
exists(p: string, cb: (exists: boolean) => void): void;
|
||||
realpath(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}, cb: BFSCallback<string>): void;
|
||||
truncate(p: string, len: number, cb: Function): void;
|
||||
readFile(fname: string, encoding: string, flag: FileFlag, cb: BFSCallback<any>): void;
|
||||
writeFile(fname: string, data: any, encoding: string, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
appendFile(fname: string, data: any, encoding: string, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
chmod(p: string, isLchmod: boolean, mode: number, cb: Function): void;
|
||||
chown(p: string, isLchown: boolean, uid: number, gid: number, cb: Function): void;
|
||||
utimes(p: string, atime: Date, mtime: Date, cb: Function): void;
|
||||
link(srcpath: string, dstpath: string, cb: Function): void;
|
||||
symlink(srcpath: string, dstpath: string, type: string, cb: Function): void;
|
||||
readlink(p: string, cb: Function): void;
|
||||
syncClose(method: string, fd: File, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Called once both local and remote sides are set up.
|
||||
*/
|
||||
private _initialize;
|
||||
private _argRemote2Local;
|
||||
private _rpc;
|
||||
/**
|
||||
* Converts a local argument into a remote argument. Public so WorkerFile objects can call it.
|
||||
*/
|
||||
private _argLocal2Remote;
|
||||
}
|
||||
770
sandpack-generated/static/browserfs11/node/backend/WorkerFS.js
Normal file
770
sandpack-generated/static/browserfs11/node/backend/WorkerFS.js
Normal file
@@ -0,0 +1,770 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var file_flag_1 = require("../core/file_flag");
|
||||
var util_1 = require("../core/util");
|
||||
var file_1 = require("../core/file");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var global_1 = require("../core/global");
|
||||
var node_fs_1 = require("../core/node_fs");
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var SpecialArgType;
|
||||
(function (SpecialArgType) {
|
||||
// Callback
|
||||
SpecialArgType[SpecialArgType["CB"] = 0] = "CB";
|
||||
// File descriptor
|
||||
SpecialArgType[SpecialArgType["FD"] = 1] = "FD";
|
||||
// API error
|
||||
SpecialArgType[SpecialArgType["API_ERROR"] = 2] = "API_ERROR";
|
||||
// Stats object
|
||||
SpecialArgType[SpecialArgType["STATS"] = 3] = "STATS";
|
||||
// Initial probe for file system information.
|
||||
SpecialArgType[SpecialArgType["PROBE"] = 4] = "PROBE";
|
||||
// FileFlag object.
|
||||
SpecialArgType[SpecialArgType["FILEFLAG"] = 5] = "FILEFLAG";
|
||||
// Buffer object.
|
||||
SpecialArgType[SpecialArgType["BUFFER"] = 6] = "BUFFER";
|
||||
// Generic Error object.
|
||||
SpecialArgType[SpecialArgType["ERROR"] = 7] = "ERROR";
|
||||
})(SpecialArgType || (SpecialArgType = {}));
|
||||
/**
|
||||
* Converts callback arguments into ICallbackArgument objects, and back
|
||||
* again.
|
||||
* @hidden
|
||||
*/
|
||||
var CallbackArgumentConverter = /** @class */ (function () {
|
||||
function CallbackArgumentConverter() {
|
||||
this._callbacks = {};
|
||||
this._nextId = 0;
|
||||
}
|
||||
CallbackArgumentConverter.prototype.toRemoteArg = function (cb) {
|
||||
var id = this._nextId++;
|
||||
this._callbacks[id] = cb;
|
||||
return {
|
||||
type: SpecialArgType.CB,
|
||||
id: id
|
||||
};
|
||||
};
|
||||
CallbackArgumentConverter.prototype.toLocalArg = function (id) {
|
||||
var cb = this._callbacks[id];
|
||||
delete this._callbacks[id];
|
||||
return cb;
|
||||
};
|
||||
return CallbackArgumentConverter;
|
||||
}());
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var FileDescriptorArgumentConverter = /** @class */ (function () {
|
||||
function FileDescriptorArgumentConverter() {
|
||||
this._fileDescriptors = {};
|
||||
this._nextId = 0;
|
||||
}
|
||||
FileDescriptorArgumentConverter.prototype.toRemoteArg = function (fd, p, flag, cb) {
|
||||
var id = this._nextId++;
|
||||
var data;
|
||||
var stat;
|
||||
this._fileDescriptors[id] = fd;
|
||||
// Extract needed information asynchronously.
|
||||
fd.stat(function (err, stats) {
|
||||
if (err) {
|
||||
cb(err);
|
||||
}
|
||||
else {
|
||||
stat = bufferToTransferrableObject(stats.toBuffer());
|
||||
// If it's a readable flag, we need to grab contents.
|
||||
if (flag.isReadable()) {
|
||||
fd.read(Buffer.alloc(stats.size), 0, stats.size, 0, function (err, bytesRead, buff) {
|
||||
if (err) {
|
||||
cb(err);
|
||||
}
|
||||
else {
|
||||
data = bufferToTransferrableObject(buff);
|
||||
cb(null, {
|
||||
type: SpecialArgType.FD,
|
||||
id: id,
|
||||
data: data,
|
||||
stat: stat,
|
||||
path: p,
|
||||
flag: flag.getFlagString()
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
// File is not readable, which means writing to it will append or
|
||||
// truncate/replace existing contents. Return an empty arraybuffer.
|
||||
cb(null, {
|
||||
type: SpecialArgType.FD,
|
||||
id: id,
|
||||
data: new ArrayBuffer(0),
|
||||
stat: stat,
|
||||
path: p,
|
||||
flag: flag.getFlagString()
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
FileDescriptorArgumentConverter.prototype.applyFdAPIRequest = function (request, cb) {
|
||||
var _this = this;
|
||||
var fdArg = request.args[0];
|
||||
this._applyFdChanges(fdArg, function (err, fd) {
|
||||
if (err) {
|
||||
cb(err);
|
||||
}
|
||||
else {
|
||||
// Apply method on now-changed file descriptor.
|
||||
fd[request.method](function (e) {
|
||||
if (request.method === 'close') {
|
||||
delete _this._fileDescriptors[fdArg.id];
|
||||
}
|
||||
cb(e);
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
FileDescriptorArgumentConverter.prototype._applyFdChanges = function (remoteFd, cb) {
|
||||
var fd = this._fileDescriptors[remoteFd.id], data = transferrableObjectToBuffer(remoteFd.data), remoteStats = node_fs_stats_1.default.fromBuffer(transferrableObjectToBuffer(remoteFd.stat));
|
||||
// Write data if the file is writable.
|
||||
var flag = file_flag_1.FileFlag.getFileFlag(remoteFd.flag);
|
||||
if (flag.isWriteable()) {
|
||||
// Appendable: Write to end of file.
|
||||
// Writeable: Replace entire contents of file.
|
||||
fd.write(data, 0, data.length, flag.isAppendable() ? fd.getPos() : 0, function (e) {
|
||||
function applyStatChanges() {
|
||||
// Check if mode changed.
|
||||
fd.stat(function (e, stats) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
if (stats.mode !== remoteStats.mode) {
|
||||
fd.chmod(remoteStats.mode, function (e) {
|
||||
cb(e, fd);
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(e, fd);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
// If writeable & not appendable, we need to ensure file contents are
|
||||
// identical to those from the remote FD. Thus, we truncate to the
|
||||
// length of the remote file.
|
||||
if (!flag.isAppendable()) {
|
||||
fd.truncate(data.length, function () {
|
||||
applyStatChanges();
|
||||
});
|
||||
}
|
||||
else {
|
||||
applyStatChanges();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, fd);
|
||||
}
|
||||
};
|
||||
return FileDescriptorArgumentConverter;
|
||||
}());
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function apiErrorLocal2Remote(e) {
|
||||
return {
|
||||
type: SpecialArgType.API_ERROR,
|
||||
errorData: bufferToTransferrableObject(e.writeToBuffer())
|
||||
};
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function apiErrorRemote2Local(e) {
|
||||
return api_error_1.ApiError.fromBuffer(transferrableObjectToBuffer(e.errorData));
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function errorLocal2Remote(e) {
|
||||
return {
|
||||
type: SpecialArgType.ERROR,
|
||||
name: e.name,
|
||||
message: e.message,
|
||||
stack: e.stack
|
||||
};
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function errorRemote2Local(e) {
|
||||
var cnstr = global_1.default[e.name];
|
||||
if (typeof (cnstr) !== 'function') {
|
||||
cnstr = Error;
|
||||
}
|
||||
var err = new cnstr(e.message);
|
||||
err.stack = e.stack;
|
||||
return err;
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function statsLocal2Remote(stats) {
|
||||
return {
|
||||
type: SpecialArgType.STATS,
|
||||
statsData: bufferToTransferrableObject(stats.toBuffer())
|
||||
};
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function statsRemote2Local(stats) {
|
||||
return node_fs_stats_1.default.fromBuffer(transferrableObjectToBuffer(stats.statsData));
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function fileFlagLocal2Remote(flag) {
|
||||
return {
|
||||
type: SpecialArgType.FILEFLAG,
|
||||
flagStr: flag.getFlagString()
|
||||
};
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function fileFlagRemote2Local(remoteFlag) {
|
||||
return file_flag_1.FileFlag.getFileFlag(remoteFlag.flagStr);
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function bufferToTransferrableObject(buff) {
|
||||
return (0, util_1.buffer2ArrayBuffer)(buff);
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function transferrableObjectToBuffer(buff) {
|
||||
return (0, util_1.arrayBuffer2Buffer)(buff);
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function bufferLocal2Remote(buff) {
|
||||
return {
|
||||
type: SpecialArgType.BUFFER,
|
||||
data: bufferToTransferrableObject(buff)
|
||||
};
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function bufferRemote2Local(buffArg) {
|
||||
return transferrableObjectToBuffer(buffArg.data);
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function isAPIRequest(data) {
|
||||
return data && typeof data === 'object' && data.hasOwnProperty('browserfsMessage') && data['browserfsMessage'];
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function isAPIResponse(data) {
|
||||
return data && typeof data === 'object' && data.hasOwnProperty('browserfsMessage') && data['browserfsMessage'];
|
||||
}
|
||||
/**
|
||||
* Represents a remote file in a different worker/thread.
|
||||
*/
|
||||
var WorkerFile = /** @class */ (function (_super) {
|
||||
__extends(WorkerFile, _super);
|
||||
function WorkerFile(_fs, _path, _flag, _stat, remoteFdId, contents) {
|
||||
var _this = _super.call(this, _fs, _path, _flag, _stat, contents) || this;
|
||||
_this._remoteFdId = remoteFdId;
|
||||
return _this;
|
||||
}
|
||||
WorkerFile.prototype.getRemoteFdId = function () {
|
||||
return this._remoteFdId;
|
||||
};
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
WorkerFile.prototype.toRemoteArg = function () {
|
||||
return {
|
||||
type: SpecialArgType.FD,
|
||||
id: this._remoteFdId,
|
||||
data: bufferToTransferrableObject(this.getBuffer()),
|
||||
stat: bufferToTransferrableObject(this.getStats().toBuffer()),
|
||||
path: this.getPath(),
|
||||
flag: this.getFlag().getFlagString()
|
||||
};
|
||||
};
|
||||
WorkerFile.prototype.sync = function (cb) {
|
||||
this._syncClose('sync', cb);
|
||||
};
|
||||
WorkerFile.prototype.close = function (cb) {
|
||||
this._syncClose('close', cb);
|
||||
};
|
||||
WorkerFile.prototype._syncClose = function (type, cb) {
|
||||
var _this = this;
|
||||
if (this.isDirty()) {
|
||||
this._fs.syncClose(type, this, function (e) {
|
||||
if (!e) {
|
||||
_this.resetDirty();
|
||||
}
|
||||
cb(e);
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
};
|
||||
return WorkerFile;
|
||||
}(preload_file_1.default));
|
||||
/**
|
||||
* WorkerFS lets you access a BrowserFS instance that is running in a different
|
||||
* JavaScript context (e.g. access BrowserFS in one of your WebWorkers, or
|
||||
* access BrowserFS running on the main page from a WebWorker).
|
||||
*
|
||||
* For example, to have a WebWorker access files in the main browser thread,
|
||||
* do the following:
|
||||
*
|
||||
* MAIN BROWSER THREAD:
|
||||
*
|
||||
* ```javascript
|
||||
* // Listen for remote file system requests.
|
||||
* BrowserFS.FileSystem.WorkerFS.attachRemoteListener(webWorkerObject);
|
||||
* ```
|
||||
*
|
||||
* WEBWORKER THREAD:
|
||||
*
|
||||
* ```javascript
|
||||
* // Set the remote file system as the root file system.
|
||||
* BrowserFS.configure({ fs: "WorkerFS", options: { worker: self }}, function(e) {
|
||||
* // Ready!
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* Note that synchronous operations are not permitted on the WorkerFS, regardless
|
||||
* of the configuration option of the remote FS.
|
||||
*/
|
||||
var WorkerFS = /** @class */ (function (_super) {
|
||||
__extends(WorkerFS, _super);
|
||||
/**
|
||||
* Constructs a new WorkerFS instance that connects with BrowserFS running on
|
||||
* the specified worker.
|
||||
*/
|
||||
function WorkerFS(worker) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this._callbackConverter = new CallbackArgumentConverter();
|
||||
_this._isInitialized = false;
|
||||
_this._isReadOnly = false;
|
||||
_this._supportLinks = false;
|
||||
_this._supportProps = false;
|
||||
_this._worker = worker;
|
||||
_this._worker.addEventListener('message', function (e) {
|
||||
var resp = e.data;
|
||||
if (isAPIResponse(resp)) {
|
||||
var i = void 0;
|
||||
var args = resp.args;
|
||||
var fixedArgs = new Array(args.length);
|
||||
// Dispatch event to correct id.
|
||||
for (i = 0; i < fixedArgs.length; i++) {
|
||||
fixedArgs[i] = _this._argRemote2Local(args[i]);
|
||||
}
|
||||
_this._callbackConverter.toLocalArg(resp.cbId).apply(null, fixedArgs);
|
||||
}
|
||||
});
|
||||
return _this;
|
||||
}
|
||||
WorkerFS.Create = function (opts, cb) {
|
||||
var fs = new WorkerFS(opts.worker);
|
||||
fs._initialize(function () {
|
||||
cb(null, fs);
|
||||
});
|
||||
};
|
||||
WorkerFS.isAvailable = function () {
|
||||
return typeof (importScripts) !== 'undefined' || typeof (Worker) !== 'undefined';
|
||||
};
|
||||
/**
|
||||
* Attaches a listener to the remote worker for file system requests.
|
||||
*/
|
||||
WorkerFS.attachRemoteListener = function (worker) {
|
||||
var fdConverter = new FileDescriptorArgumentConverter();
|
||||
function argLocal2Remote(arg, requestArgs, cb) {
|
||||
switch (typeof arg) {
|
||||
case 'object':
|
||||
if (arg instanceof node_fs_stats_1.default) {
|
||||
cb(null, statsLocal2Remote(arg));
|
||||
}
|
||||
else if (arg instanceof api_error_1.ApiError) {
|
||||
cb(null, apiErrorLocal2Remote(arg));
|
||||
}
|
||||
else if (arg instanceof file_1.BaseFile) {
|
||||
// Pass in p and flags from original request.
|
||||
cb(null, fdConverter.toRemoteArg(arg, requestArgs[0], requestArgs[1], cb));
|
||||
}
|
||||
else if (arg instanceof file_flag_1.FileFlag) {
|
||||
cb(null, fileFlagLocal2Remote(arg));
|
||||
}
|
||||
else if (arg instanceof Buffer) {
|
||||
cb(null, bufferLocal2Remote(arg));
|
||||
}
|
||||
else if (arg instanceof Error) {
|
||||
cb(null, errorLocal2Remote(arg));
|
||||
}
|
||||
else {
|
||||
cb(null, arg);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
cb(null, arg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
function argRemote2Local(arg, fixedRequestArgs) {
|
||||
if (!arg) {
|
||||
return arg;
|
||||
}
|
||||
switch (typeof arg) {
|
||||
case 'object':
|
||||
if (typeof arg['type'] === 'number') {
|
||||
var specialArg = arg;
|
||||
switch (specialArg.type) {
|
||||
case SpecialArgType.CB:
|
||||
var cbId_1 = arg.id;
|
||||
return function () {
|
||||
var i;
|
||||
var fixedArgs = new Array(arguments.length);
|
||||
var message, countdown = arguments.length;
|
||||
function abortAndSendError(err) {
|
||||
if (countdown > 0) {
|
||||
countdown = -1;
|
||||
message = {
|
||||
browserfsMessage: true,
|
||||
cbId: cbId_1,
|
||||
args: [apiErrorLocal2Remote(err)]
|
||||
};
|
||||
worker.postMessage(message);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < arguments.length; i++) {
|
||||
// Capture i and argument.
|
||||
(function (i, arg) {
|
||||
argLocal2Remote(arg, fixedRequestArgs, function (err, fixedArg) {
|
||||
fixedArgs[i] = fixedArg;
|
||||
if (err) {
|
||||
abortAndSendError(err);
|
||||
}
|
||||
else if (--countdown === 0) {
|
||||
message = {
|
||||
browserfsMessage: true,
|
||||
cbId: cbId_1,
|
||||
args: fixedArgs
|
||||
};
|
||||
worker.postMessage(message);
|
||||
}
|
||||
});
|
||||
})(i, arguments[i]);
|
||||
}
|
||||
if (arguments.length === 0) {
|
||||
message = {
|
||||
browserfsMessage: true,
|
||||
cbId: cbId_1,
|
||||
args: fixedArgs
|
||||
};
|
||||
worker.postMessage(message);
|
||||
}
|
||||
};
|
||||
case SpecialArgType.API_ERROR:
|
||||
return apiErrorRemote2Local(specialArg);
|
||||
case SpecialArgType.STATS:
|
||||
return statsRemote2Local(specialArg);
|
||||
case SpecialArgType.FILEFLAG:
|
||||
return fileFlagRemote2Local(specialArg);
|
||||
case SpecialArgType.BUFFER:
|
||||
return bufferRemote2Local(specialArg);
|
||||
case SpecialArgType.ERROR:
|
||||
return errorRemote2Local(specialArg);
|
||||
default:
|
||||
// No idea what this is.
|
||||
return arg;
|
||||
}
|
||||
}
|
||||
else {
|
||||
return arg;
|
||||
}
|
||||
default:
|
||||
return arg;
|
||||
}
|
||||
}
|
||||
worker.addEventListener('message', function (e) {
|
||||
var request = e.data;
|
||||
if (isAPIRequest(request)) {
|
||||
var args_1 = request.args, fixedArgs = new Array(args_1.length);
|
||||
switch (request.method) {
|
||||
case 'close':
|
||||
case 'sync':
|
||||
(function () {
|
||||
// File descriptor-relative methods.
|
||||
var remoteCb = args_1[1];
|
||||
fdConverter.applyFdAPIRequest(request, function (err) {
|
||||
// Send response.
|
||||
var response = {
|
||||
browserfsMessage: true,
|
||||
cbId: remoteCb.id,
|
||||
args: err ? [apiErrorLocal2Remote(err)] : []
|
||||
};
|
||||
worker.postMessage(response);
|
||||
});
|
||||
})();
|
||||
break;
|
||||
case 'probe':
|
||||
(function () {
|
||||
var rootFs = node_fs_1.default.getRootFS(), remoteCb = args_1[1], probeResponse = {
|
||||
type: SpecialArgType.PROBE,
|
||||
isReadOnly: rootFs.isReadOnly(),
|
||||
supportsLinks: rootFs.supportsLinks(),
|
||||
supportsProps: rootFs.supportsProps()
|
||||
}, response = {
|
||||
browserfsMessage: true,
|
||||
cbId: remoteCb.id,
|
||||
args: [probeResponse]
|
||||
};
|
||||
worker.postMessage(response);
|
||||
})();
|
||||
break;
|
||||
default:
|
||||
// File system methods.
|
||||
for (var i = 0; i < args_1.length; i++) {
|
||||
fixedArgs[i] = argRemote2Local(args_1[i], fixedArgs);
|
||||
}
|
||||
var rootFS = node_fs_1.default.getRootFS();
|
||||
rootFS[request.method].apply(rootFS, fixedArgs);
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
WorkerFS.prototype.getName = function () {
|
||||
return WorkerFS.Name;
|
||||
};
|
||||
WorkerFS.prototype.isReadOnly = function () { return this._isReadOnly; };
|
||||
WorkerFS.prototype.supportsSynch = function () { return false; };
|
||||
WorkerFS.prototype.supportsLinks = function () { return this._supportLinks; };
|
||||
WorkerFS.prototype.supportsProps = function () { return this._supportProps; };
|
||||
WorkerFS.prototype.rename = function (oldPath, newPath, cb) {
|
||||
this._rpc('rename', arguments);
|
||||
};
|
||||
WorkerFS.prototype.stat = function (p, isLstat, cb) {
|
||||
this._rpc('stat', arguments);
|
||||
};
|
||||
WorkerFS.prototype.open = function (p, flag, mode, cb) {
|
||||
this._rpc('open', arguments);
|
||||
};
|
||||
WorkerFS.prototype.unlink = function (p, cb) {
|
||||
this._rpc('unlink', arguments);
|
||||
};
|
||||
WorkerFS.prototype.rmdir = function (p, cb) {
|
||||
this._rpc('rmdir', arguments);
|
||||
};
|
||||
WorkerFS.prototype.mkdir = function (p, mode, cb) {
|
||||
this._rpc('mkdir', arguments);
|
||||
};
|
||||
WorkerFS.prototype.readdir = function (p, cb) {
|
||||
this._rpc('readdir', arguments);
|
||||
};
|
||||
WorkerFS.prototype.exists = function (p, cb) {
|
||||
this._rpc('exists', arguments);
|
||||
};
|
||||
WorkerFS.prototype.realpath = function (p, cache, cb) {
|
||||
this._rpc('realpath', arguments);
|
||||
};
|
||||
WorkerFS.prototype.truncate = function (p, len, cb) {
|
||||
this._rpc('truncate', arguments);
|
||||
};
|
||||
WorkerFS.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
this._rpc('readFile', arguments);
|
||||
};
|
||||
WorkerFS.prototype.writeFile = function (fname, data, encoding, flag, mode, cb) {
|
||||
this._rpc('writeFile', arguments);
|
||||
};
|
||||
WorkerFS.prototype.appendFile = function (fname, data, encoding, flag, mode, cb) {
|
||||
this._rpc('appendFile', arguments);
|
||||
};
|
||||
WorkerFS.prototype.chmod = function (p, isLchmod, mode, cb) {
|
||||
this._rpc('chmod', arguments);
|
||||
};
|
||||
WorkerFS.prototype.chown = function (p, isLchown, uid, gid, cb) {
|
||||
this._rpc('chown', arguments);
|
||||
};
|
||||
WorkerFS.prototype.utimes = function (p, atime, mtime, cb) {
|
||||
this._rpc('utimes', arguments);
|
||||
};
|
||||
WorkerFS.prototype.link = function (srcpath, dstpath, cb) {
|
||||
this._rpc('link', arguments);
|
||||
};
|
||||
WorkerFS.prototype.symlink = function (srcpath, dstpath, type, cb) {
|
||||
this._rpc('symlink', arguments);
|
||||
};
|
||||
WorkerFS.prototype.readlink = function (p, cb) {
|
||||
this._rpc('readlink', arguments);
|
||||
};
|
||||
WorkerFS.prototype.syncClose = function (method, fd, cb) {
|
||||
this._worker.postMessage({
|
||||
browserfsMessage: true,
|
||||
method: method,
|
||||
args: [fd.toRemoteArg(), this._callbackConverter.toRemoteArg(cb)]
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Called once both local and remote sides are set up.
|
||||
*/
|
||||
WorkerFS.prototype._initialize = function (cb) {
|
||||
var _this = this;
|
||||
if (!this._isInitialized) {
|
||||
var message = {
|
||||
browserfsMessage: true,
|
||||
method: 'probe',
|
||||
args: [this._argLocal2Remote((0, util_1.emptyBuffer)()), this._callbackConverter.toRemoteArg(function (probeResponse) {
|
||||
_this._isInitialized = true;
|
||||
_this._isReadOnly = probeResponse.isReadOnly;
|
||||
_this._supportLinks = probeResponse.supportsLinks;
|
||||
_this._supportProps = probeResponse.supportsProps;
|
||||
cb();
|
||||
})]
|
||||
};
|
||||
this._worker.postMessage(message);
|
||||
}
|
||||
else {
|
||||
cb();
|
||||
}
|
||||
};
|
||||
WorkerFS.prototype._argRemote2Local = function (arg) {
|
||||
if (!arg) {
|
||||
return arg;
|
||||
}
|
||||
switch (typeof arg) {
|
||||
case 'object':
|
||||
if (typeof arg['type'] === 'number') {
|
||||
var specialArg = arg;
|
||||
switch (specialArg.type) {
|
||||
case SpecialArgType.API_ERROR:
|
||||
return apiErrorRemote2Local(specialArg);
|
||||
case SpecialArgType.FD:
|
||||
var fdArg = specialArg;
|
||||
return new WorkerFile(this, fdArg.path, file_flag_1.FileFlag.getFileFlag(fdArg.flag), node_fs_stats_1.default.fromBuffer(transferrableObjectToBuffer(fdArg.stat)), fdArg.id, transferrableObjectToBuffer(fdArg.data));
|
||||
case SpecialArgType.STATS:
|
||||
return statsRemote2Local(specialArg);
|
||||
case SpecialArgType.FILEFLAG:
|
||||
return fileFlagRemote2Local(specialArg);
|
||||
case SpecialArgType.BUFFER:
|
||||
return bufferRemote2Local(specialArg);
|
||||
case SpecialArgType.ERROR:
|
||||
return errorRemote2Local(specialArg);
|
||||
default:
|
||||
return arg;
|
||||
}
|
||||
}
|
||||
else {
|
||||
return arg;
|
||||
}
|
||||
default:
|
||||
return arg;
|
||||
}
|
||||
};
|
||||
WorkerFS.prototype._rpc = function (methodName, args) {
|
||||
var fixedArgs = new Array(args.length);
|
||||
for (var i = 0; i < args.length; i++) {
|
||||
fixedArgs[i] = this._argLocal2Remote(args[i]);
|
||||
}
|
||||
var message = {
|
||||
browserfsMessage: true,
|
||||
method: methodName,
|
||||
args: fixedArgs
|
||||
};
|
||||
this._worker.postMessage(message);
|
||||
};
|
||||
/**
|
||||
* Converts a local argument into a remote argument. Public so WorkerFile objects can call it.
|
||||
*/
|
||||
WorkerFS.prototype._argLocal2Remote = function (arg) {
|
||||
if (!arg) {
|
||||
return arg;
|
||||
}
|
||||
switch (typeof arg) {
|
||||
case "object":
|
||||
if (arg instanceof node_fs_stats_1.default) {
|
||||
return statsLocal2Remote(arg);
|
||||
}
|
||||
else if (arg instanceof api_error_1.ApiError) {
|
||||
return apiErrorLocal2Remote(arg);
|
||||
}
|
||||
else if (arg instanceof WorkerFile) {
|
||||
return arg.toRemoteArg();
|
||||
}
|
||||
else if (arg instanceof file_flag_1.FileFlag) {
|
||||
return fileFlagLocal2Remote(arg);
|
||||
}
|
||||
else if (arg instanceof Buffer) {
|
||||
return bufferLocal2Remote(arg);
|
||||
}
|
||||
else if (arg instanceof Error) {
|
||||
return errorLocal2Remote(arg);
|
||||
}
|
||||
else {
|
||||
return "Unknown argument";
|
||||
}
|
||||
case "function":
|
||||
return this._callbackConverter.toRemoteArg(arg);
|
||||
default:
|
||||
return arg;
|
||||
}
|
||||
};
|
||||
WorkerFS.Name = "WorkerFS";
|
||||
WorkerFS.Options = {
|
||||
worker: {
|
||||
type: "object",
|
||||
description: "The target worker that you want to connect to, or the current worker if in a worker context.",
|
||||
validator: function (v, cb) {
|
||||
// Check for a `postMessage` function.
|
||||
if (v['postMessage']) {
|
||||
cb();
|
||||
}
|
||||
else {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "option must be a Web Worker instance."));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
return WorkerFS;
|
||||
}(file_system_1.BaseFileSystem));
|
||||
exports.default = WorkerFS;
|
||||
//# sourceMappingURL=WorkerFS.js.map
|
||||
375
sandpack-generated/static/browserfs11/node/backend/ZipFS.d.ts
vendored
Normal file
375
sandpack-generated/static/browserfs11/node/backend/ZipFS.d.ts
vendored
Normal file
@@ -0,0 +1,375 @@
|
||||
/// <reference types="node" />
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { SynchronousFileSystem, FileSystem, BFSCallback, FileSystemOptions } from '../core/file_system';
|
||||
import { File } from '../core/file';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { FileIndex } from '../generic/file_index';
|
||||
/**
|
||||
* 4.4.2.2: Indicates the compatibiltiy of a file's external attributes.
|
||||
*/
|
||||
export declare enum ExternalFileAttributeType {
|
||||
MSDOS = 0,
|
||||
AMIGA = 1,
|
||||
OPENVMS = 2,
|
||||
UNIX = 3,
|
||||
VM_CMS = 4,
|
||||
ATARI_ST = 5,
|
||||
OS2_HPFS = 6,
|
||||
MAC = 7,
|
||||
Z_SYSTEM = 8,
|
||||
CP_M = 9,
|
||||
NTFS = 10,
|
||||
MVS = 11,
|
||||
VSE = 12,
|
||||
ACORN_RISC = 13,
|
||||
VFAT = 14,
|
||||
ALT_MVS = 15,
|
||||
BEOS = 16,
|
||||
TANDEM = 17,
|
||||
OS_400 = 18,
|
||||
OSX = 19
|
||||
}
|
||||
/**
|
||||
* 4.4.5
|
||||
*/
|
||||
export declare enum CompressionMethod {
|
||||
STORED = 0,
|
||||
SHRUNK = 1,
|
||||
REDUCED_1 = 2,
|
||||
REDUCED_2 = 3,
|
||||
REDUCED_3 = 4,
|
||||
REDUCED_4 = 5,
|
||||
IMPLODE = 6,
|
||||
DEFLATE = 8,
|
||||
DEFLATE64 = 9,
|
||||
TERSE_OLD = 10,
|
||||
BZIP2 = 12,
|
||||
LZMA = 14,
|
||||
TERSE_NEW = 18,
|
||||
LZ77 = 19,
|
||||
WAVPACK = 97,
|
||||
PPMD = 98
|
||||
}
|
||||
/**
|
||||
* 4.3.7 Local file header:
|
||||
*
|
||||
* local file header signature 4 bytes (0x04034b50)
|
||||
* version needed to extract 2 bytes
|
||||
* general purpose bit flag 2 bytes
|
||||
* compression method 2 bytes
|
||||
* last mod file time 2 bytes
|
||||
* last mod file date 2 bytes
|
||||
* crc-32 4 bytes
|
||||
* compressed size 4 bytes
|
||||
* uncompressed size 4 bytes
|
||||
* file name length 2 bytes
|
||||
* extra field length 2 bytes
|
||||
*
|
||||
* file name (variable size)
|
||||
* extra field (variable size)
|
||||
*/
|
||||
export declare class FileHeader {
|
||||
private data;
|
||||
constructor(data: Buffer);
|
||||
versionNeeded(): number;
|
||||
flags(): number;
|
||||
compressionMethod(): CompressionMethod;
|
||||
lastModFileTime(): Date;
|
||||
rawLastModFileTime(): number;
|
||||
crc32(): number;
|
||||
/**
|
||||
* These two values are COMPLETELY USELESS.
|
||||
*
|
||||
* Section 4.4.9:
|
||||
* If bit 3 of the general purpose bit flag is set,
|
||||
* these fields are set to zero in the local header and the
|
||||
* correct values are put in the data descriptor and
|
||||
* in the central directory.
|
||||
*
|
||||
* So we'll just use the central directory's values.
|
||||
*/
|
||||
fileNameLength(): number;
|
||||
extraFieldLength(): number;
|
||||
fileName(): string;
|
||||
extraField(): Buffer;
|
||||
totalSize(): number;
|
||||
useUTF8(): boolean;
|
||||
}
|
||||
/**
|
||||
* 4.3.8 File data
|
||||
*
|
||||
* Immediately following the local header for a file
|
||||
* SHOULD be placed the compressed or stored data for the file.
|
||||
* If the file is encrypted, the encryption header for the file
|
||||
* SHOULD be placed after the local header and before the file
|
||||
* data. The series of [local file header][encryption header]
|
||||
* [file data][data descriptor] repeats for each file in the
|
||||
* .ZIP archive.
|
||||
*
|
||||
* Zero-byte files, directories, and other file types that
|
||||
* contain no content MUST not include file data.
|
||||
*/
|
||||
export declare class FileData {
|
||||
private header;
|
||||
private record;
|
||||
private data;
|
||||
constructor(header: FileHeader, record: CentralDirectory, data: Buffer);
|
||||
decompress(): Buffer;
|
||||
getHeader(): FileHeader;
|
||||
getRecord(): CentralDirectory;
|
||||
getRawData(): Buffer;
|
||||
}
|
||||
/**
|
||||
* 4.3.9 Data descriptor:
|
||||
*
|
||||
* crc-32 4 bytes
|
||||
* compressed size 4 bytes
|
||||
* uncompressed size 4 bytes
|
||||
*/
|
||||
export declare class DataDescriptor {
|
||||
private data;
|
||||
constructor(data: Buffer);
|
||||
crc32(): number;
|
||||
compressedSize(): number;
|
||||
uncompressedSize(): number;
|
||||
}
|
||||
/**
|
||||
* 4.3.11 Archive extra data record:
|
||||
*
|
||||
* archive extra data signature 4 bytes (0x08064b50)
|
||||
* extra field length 4 bytes
|
||||
* extra field data (variable size)
|
||||
*
|
||||
* 4.3.11.1 The Archive Extra Data Record is introduced in version 6.2
|
||||
* of the ZIP format specification. This record MAY be used in support
|
||||
* of the Central Directory Encryption Feature implemented as part of
|
||||
* the Strong Encryption Specification as described in this document.
|
||||
* When present, this record MUST immediately precede the central
|
||||
* directory data structure.
|
||||
*/
|
||||
export declare class ArchiveExtraDataRecord {
|
||||
private data;
|
||||
constructor(data: Buffer);
|
||||
length(): number;
|
||||
extraFieldData(): Buffer;
|
||||
}
|
||||
/**
|
||||
* 4.3.13 Digital signature:
|
||||
*
|
||||
* header signature 4 bytes (0x05054b50)
|
||||
* size of data 2 bytes
|
||||
* signature data (variable size)
|
||||
*
|
||||
* With the introduction of the Central Directory Encryption
|
||||
* feature in version 6.2 of this specification, the Central
|
||||
* Directory Structure MAY be stored both compressed and encrypted.
|
||||
* Although not required, it is assumed when encrypting the
|
||||
* Central Directory Structure, that it will be compressed
|
||||
* for greater storage efficiency. Information on the
|
||||
* Central Directory Encryption feature can be found in the section
|
||||
* describing the Strong Encryption Specification. The Digital
|
||||
* Signature record will be neither compressed nor encrypted.
|
||||
*/
|
||||
export declare class DigitalSignature {
|
||||
private data;
|
||||
constructor(data: Buffer);
|
||||
size(): number;
|
||||
signatureData(): Buffer;
|
||||
}
|
||||
/**
|
||||
* 4.3.12 Central directory structure:
|
||||
*
|
||||
* central file header signature 4 bytes (0x02014b50)
|
||||
* version made by 2 bytes
|
||||
* version needed to extract 2 bytes
|
||||
* general purpose bit flag 2 bytes
|
||||
* compression method 2 bytes
|
||||
* last mod file time 2 bytes
|
||||
* last mod file date 2 bytes
|
||||
* crc-32 4 bytes
|
||||
* compressed size 4 bytes
|
||||
* uncompressed size 4 bytes
|
||||
* file name length 2 bytes
|
||||
* extra field length 2 bytes
|
||||
* file comment length 2 bytes
|
||||
* disk number start 2 bytes
|
||||
* internal file attributes 2 bytes
|
||||
* external file attributes 4 bytes
|
||||
* relative offset of local header 4 bytes
|
||||
*
|
||||
* file name (variable size)
|
||||
* extra field (variable size)
|
||||
* file comment (variable size)
|
||||
*/
|
||||
export declare class CentralDirectory {
|
||||
private zipData;
|
||||
private data;
|
||||
private _filename;
|
||||
constructor(zipData: Buffer, data: Buffer);
|
||||
versionMadeBy(): number;
|
||||
versionNeeded(): number;
|
||||
flag(): number;
|
||||
compressionMethod(): CompressionMethod;
|
||||
lastModFileTime(): Date;
|
||||
rawLastModFileTime(): number;
|
||||
crc32(): number;
|
||||
compressedSize(): number;
|
||||
uncompressedSize(): number;
|
||||
fileNameLength(): number;
|
||||
extraFieldLength(): number;
|
||||
fileCommentLength(): number;
|
||||
diskNumberStart(): number;
|
||||
internalAttributes(): number;
|
||||
externalAttributes(): number;
|
||||
headerRelativeOffset(): number;
|
||||
produceFilename(): string;
|
||||
fileName(): string;
|
||||
rawFileName(): Buffer;
|
||||
extraField(): Buffer;
|
||||
fileComment(): string;
|
||||
rawFileComment(): Buffer;
|
||||
totalSize(): number;
|
||||
isDirectory(): boolean;
|
||||
isFile(): boolean;
|
||||
useUTF8(): boolean;
|
||||
isEncrypted(): boolean;
|
||||
getFileData(): FileData;
|
||||
getData(): Buffer;
|
||||
getRawData(): Buffer;
|
||||
getStats(): Stats;
|
||||
}
|
||||
/**
|
||||
* 4.3.16: end of central directory record
|
||||
* end of central dir signature 4 bytes (0x06054b50)
|
||||
* number of this disk 2 bytes
|
||||
* number of the disk with the
|
||||
* start of the central directory 2 bytes
|
||||
* total number of entries in the
|
||||
* central directory on this disk 2 bytes
|
||||
* total number of entries in
|
||||
* the central directory 2 bytes
|
||||
* size of the central directory 4 bytes
|
||||
* offset of start of central
|
||||
* directory with respect to
|
||||
* the starting disk number 4 bytes
|
||||
* .ZIP file comment length 2 bytes
|
||||
* .ZIP file comment (variable size)
|
||||
*/
|
||||
export declare class EndOfCentralDirectory {
|
||||
private data;
|
||||
constructor(data: Buffer);
|
||||
diskNumber(): number;
|
||||
cdDiskNumber(): number;
|
||||
cdDiskEntryCount(): number;
|
||||
cdTotalEntryCount(): number;
|
||||
cdSize(): number;
|
||||
cdOffset(): number;
|
||||
cdZipCommentLength(): number;
|
||||
cdZipComment(): string;
|
||||
rawCdZipComment(): Buffer;
|
||||
}
|
||||
/**
|
||||
* Contains the table of contents of a Zip file.
|
||||
*/
|
||||
export declare class ZipTOC {
|
||||
index: FileIndex<CentralDirectory>;
|
||||
directoryEntries: CentralDirectory[];
|
||||
eocd: EndOfCentralDirectory;
|
||||
data: Buffer;
|
||||
constructor(index: FileIndex<CentralDirectory>, directoryEntries: CentralDirectory[], eocd: EndOfCentralDirectory, data: Buffer);
|
||||
}
|
||||
/**
|
||||
* Configuration options for a ZipFS file system.
|
||||
*/
|
||||
export interface ZipFSOptions {
|
||||
zipData: Buffer;
|
||||
name?: string;
|
||||
}
|
||||
/**
|
||||
* Zip file-backed filesystem
|
||||
* Implemented according to the standard:
|
||||
* http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
||||
*
|
||||
* While there are a few zip libraries for JavaScript (e.g. JSZip and zip.js),
|
||||
* they are not a good match for BrowserFS. In particular, these libraries
|
||||
* perform a lot of unneeded data copying, and eagerly decompress every file
|
||||
* in the zip file upon loading to check the CRC32. They also eagerly decode
|
||||
* strings. Furthermore, these libraries duplicate functionality already present
|
||||
* in BrowserFS (e.g. UTF-8 decoding and binary data manipulation).
|
||||
*
|
||||
* This filesystem takes advantage of BrowserFS's Buffer implementation, which
|
||||
* efficiently represents the zip file in memory (in both ArrayBuffer-enabled
|
||||
* browsers *and* non-ArrayBuffer browsers), and which can neatly be 'sliced'
|
||||
* without copying data. Each struct defined in the standard is represented with
|
||||
* a buffer slice pointing to an offset in the zip file, and has getters for
|
||||
* each field. As we anticipate that this data will not be read often, we choose
|
||||
* not to store each struct field in the JavaScript object; instead, to reduce
|
||||
* memory consumption, we retrieve it directly from the binary data each time it
|
||||
* is requested.
|
||||
*
|
||||
* When the filesystem is instantiated, we determine the directory structure
|
||||
* of the zip file as quickly as possible. We lazily decompress and check the
|
||||
* CRC32 of files. We do not cache decompressed files; if this is a desired
|
||||
* feature, it is best implemented as a generic file system wrapper that can
|
||||
* cache data from arbitrary file systems.
|
||||
*
|
||||
* For inflation, we use `pako`'s implementation:
|
||||
* https://github.com/nodeca/pako
|
||||
*
|
||||
* Current limitations:
|
||||
* * No encryption.
|
||||
* * No ZIP64 support.
|
||||
* * Read-only.
|
||||
* Write support would require that we:
|
||||
* - Keep track of changed/new files.
|
||||
* - Compress changed files, and generate appropriate metadata for each.
|
||||
* - Update file offsets for other files in the zip file.
|
||||
* - Stream it out to a location.
|
||||
* This isn't that bad, so we might do this at a later date.
|
||||
*/
|
||||
export default class ZipFS extends SynchronousFileSystem implements FileSystem {
|
||||
private name;
|
||||
static readonly Name = "ZipFS";
|
||||
static readonly Options: FileSystemOptions;
|
||||
static readonly CompressionMethod: typeof CompressionMethod;
|
||||
/**
|
||||
* Constructs a ZipFS instance with the given options.
|
||||
*/
|
||||
static Create(opts: ZipFSOptions, cb: BFSCallback<ZipFS>): void;
|
||||
static isAvailable(): boolean;
|
||||
static RegisterDecompressionMethod(m: CompressionMethod, fcn: (data: Buffer, compressedSize: number, uncompressedSize: number, flags: number) => Buffer): void;
|
||||
/**
|
||||
* Locates the end of central directory record at the end of the file.
|
||||
* Throws an exception if it cannot be found.
|
||||
*/
|
||||
private static _getEOCD;
|
||||
private static _addToIndex;
|
||||
private static _computeIndex;
|
||||
private static _computeIndexResponsiveTrampoline;
|
||||
private static _computeIndexResponsive;
|
||||
private _index;
|
||||
private _directoryEntries;
|
||||
private _eocd;
|
||||
private data;
|
||||
private constructor();
|
||||
getName(): string;
|
||||
/**
|
||||
* Get the CentralDirectory object for the given path.
|
||||
*/
|
||||
getCentralDirectoryEntry(path: string): CentralDirectory;
|
||||
getCentralDirectoryEntryAt(index: number): CentralDirectory;
|
||||
getNumberOfCentralDirectoryEntries(): number;
|
||||
getEndOfCentralDirectory(): EndOfCentralDirectory | null;
|
||||
diskSpace(path: string, cb: (total: number, free: number) => void): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
statSync(path: string, isLstat: boolean): Stats;
|
||||
openSync(path: string, flags: FileFlag, mode: number): File;
|
||||
readdirSync(path: string): string[];
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
}
|
||||
835
sandpack-generated/static/browserfs11/node/backend/ZipFS.js
Normal file
835
sandpack-generated/static/browserfs11/node/backend/ZipFS.js
Normal file
@@ -0,0 +1,835 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ZipTOC = exports.EndOfCentralDirectory = exports.CentralDirectory = exports.DigitalSignature = exports.ArchiveExtraDataRecord = exports.DataDescriptor = exports.FileData = exports.FileHeader = exports.CompressionMethod = exports.ExternalFileAttributeType = void 0;
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var file_system_1 = require("../core/file_system");
|
||||
var file_flag_1 = require("../core/file_flag");
|
||||
var preload_file_1 = require("../generic/preload_file");
|
||||
var util_1 = require("../core/util");
|
||||
var extended_ascii_1 = require("../generic/extended_ascii");
|
||||
var setImmediate_1 = require("../generic/setImmediate");
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var inflateRaw = require('pako/lib/inflate').inflateRaw;
|
||||
var file_index_1 = require("../generic/file_index");
|
||||
/**
|
||||
* Maps CompressionMethod => function that decompresses.
|
||||
* @hidden
|
||||
*/
|
||||
var decompressionMethods = {};
|
||||
/**
|
||||
* 4.4.2.2: Indicates the compatibiltiy of a file's external attributes.
|
||||
*/
|
||||
var ExternalFileAttributeType;
|
||||
(function (ExternalFileAttributeType) {
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["MSDOS"] = 0] = "MSDOS";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["AMIGA"] = 1] = "AMIGA";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["OPENVMS"] = 2] = "OPENVMS";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["UNIX"] = 3] = "UNIX";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["VM_CMS"] = 4] = "VM_CMS";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["ATARI_ST"] = 5] = "ATARI_ST";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["OS2_HPFS"] = 6] = "OS2_HPFS";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["MAC"] = 7] = "MAC";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["Z_SYSTEM"] = 8] = "Z_SYSTEM";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["CP_M"] = 9] = "CP_M";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["NTFS"] = 10] = "NTFS";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["MVS"] = 11] = "MVS";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["VSE"] = 12] = "VSE";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["ACORN_RISC"] = 13] = "ACORN_RISC";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["VFAT"] = 14] = "VFAT";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["ALT_MVS"] = 15] = "ALT_MVS";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["BEOS"] = 16] = "BEOS";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["TANDEM"] = 17] = "TANDEM";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["OS_400"] = 18] = "OS_400";
|
||||
ExternalFileAttributeType[ExternalFileAttributeType["OSX"] = 19] = "OSX";
|
||||
})(ExternalFileAttributeType || (exports.ExternalFileAttributeType = ExternalFileAttributeType = {}));
|
||||
/**
|
||||
* 4.4.5
|
||||
*/
|
||||
var CompressionMethod;
|
||||
(function (CompressionMethod) {
|
||||
CompressionMethod[CompressionMethod["STORED"] = 0] = "STORED";
|
||||
CompressionMethod[CompressionMethod["SHRUNK"] = 1] = "SHRUNK";
|
||||
CompressionMethod[CompressionMethod["REDUCED_1"] = 2] = "REDUCED_1";
|
||||
CompressionMethod[CompressionMethod["REDUCED_2"] = 3] = "REDUCED_2";
|
||||
CompressionMethod[CompressionMethod["REDUCED_3"] = 4] = "REDUCED_3";
|
||||
CompressionMethod[CompressionMethod["REDUCED_4"] = 5] = "REDUCED_4";
|
||||
CompressionMethod[CompressionMethod["IMPLODE"] = 6] = "IMPLODE";
|
||||
CompressionMethod[CompressionMethod["DEFLATE"] = 8] = "DEFLATE";
|
||||
CompressionMethod[CompressionMethod["DEFLATE64"] = 9] = "DEFLATE64";
|
||||
CompressionMethod[CompressionMethod["TERSE_OLD"] = 10] = "TERSE_OLD";
|
||||
CompressionMethod[CompressionMethod["BZIP2"] = 12] = "BZIP2";
|
||||
CompressionMethod[CompressionMethod["LZMA"] = 14] = "LZMA";
|
||||
CompressionMethod[CompressionMethod["TERSE_NEW"] = 18] = "TERSE_NEW";
|
||||
CompressionMethod[CompressionMethod["LZ77"] = 19] = "LZ77";
|
||||
CompressionMethod[CompressionMethod["WAVPACK"] = 97] = "WAVPACK";
|
||||
CompressionMethod[CompressionMethod["PPMD"] = 98] = "PPMD"; // PPMd version I, Rev 1
|
||||
})(CompressionMethod || (exports.CompressionMethod = CompressionMethod = {}));
|
||||
/**
|
||||
* Converts the input time and date in MS-DOS format into a JavaScript Date
|
||||
* object.
|
||||
* @hidden
|
||||
*/
|
||||
function msdos2date(time, date) {
|
||||
// MS-DOS Date
|
||||
// |0 0 0 0 0|0 0 0 0|0 0 0 0 0 0 0
|
||||
// D (1-31) M (1-23) Y (from 1980)
|
||||
var day = date & 0x1F;
|
||||
// JS date is 0-indexed, DOS is 1-indexed.
|
||||
var month = ((date >> 5) & 0xF) - 1;
|
||||
var year = (date >> 9) + 1980;
|
||||
// MS DOS Time
|
||||
// |0 0 0 0 0|0 0 0 0 0 0|0 0 0 0 0
|
||||
// Second Minute Hour
|
||||
var second = time & 0x1F;
|
||||
var minute = (time >> 5) & 0x3F;
|
||||
var hour = time >> 11;
|
||||
return new Date(year, month, day, hour, minute, second);
|
||||
}
|
||||
/**
|
||||
* Safely returns the string from the buffer, even if it is 0 bytes long.
|
||||
* (Normally, calling toString() on a buffer with start === end causes an
|
||||
* exception).
|
||||
* @hidden
|
||||
*/
|
||||
function safeToString(buff, useUTF8, start, length) {
|
||||
if (length === 0) {
|
||||
return "";
|
||||
}
|
||||
else if (useUTF8) {
|
||||
return buff.toString('utf8', start, start + length);
|
||||
}
|
||||
else {
|
||||
return extended_ascii_1.default.byte2str(buff.slice(start, start + length));
|
||||
}
|
||||
}
|
||||
/*
|
||||
4.3.6 Overall .ZIP file format:
|
||||
|
||||
[local file header 1]
|
||||
[encryption header 1]
|
||||
[file data 1]
|
||||
[data descriptor 1]
|
||||
.
|
||||
.
|
||||
.
|
||||
[local file header n]
|
||||
[encryption header n]
|
||||
[file data n]
|
||||
[data descriptor n]
|
||||
[archive decryption header]
|
||||
[archive extra data record]
|
||||
[central directory header 1]
|
||||
.
|
||||
.
|
||||
.
|
||||
[central directory header n]
|
||||
[zip64 end of central directory record]
|
||||
[zip64 end of central directory locator]
|
||||
[end of central directory record]
|
||||
*/
|
||||
/**
|
||||
* 4.3.7 Local file header:
|
||||
*
|
||||
* local file header signature 4 bytes (0x04034b50)
|
||||
* version needed to extract 2 bytes
|
||||
* general purpose bit flag 2 bytes
|
||||
* compression method 2 bytes
|
||||
* last mod file time 2 bytes
|
||||
* last mod file date 2 bytes
|
||||
* crc-32 4 bytes
|
||||
* compressed size 4 bytes
|
||||
* uncompressed size 4 bytes
|
||||
* file name length 2 bytes
|
||||
* extra field length 2 bytes
|
||||
*
|
||||
* file name (variable size)
|
||||
* extra field (variable size)
|
||||
*/
|
||||
var FileHeader = /** @class */ (function () {
|
||||
function FileHeader(data) {
|
||||
this.data = data;
|
||||
if (data.readUInt32LE(0) !== 0x04034b50) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid Zip file: Local file header has invalid signature: " + this.data.readUInt32LE(0));
|
||||
}
|
||||
}
|
||||
FileHeader.prototype.versionNeeded = function () { return this.data.readUInt16LE(4); };
|
||||
FileHeader.prototype.flags = function () { return this.data.readUInt16LE(6); };
|
||||
FileHeader.prototype.compressionMethod = function () { return this.data.readUInt16LE(8); };
|
||||
FileHeader.prototype.lastModFileTime = function () {
|
||||
// Time and date is in MS-DOS format.
|
||||
return msdos2date(this.data.readUInt16LE(10), this.data.readUInt16LE(12));
|
||||
};
|
||||
FileHeader.prototype.rawLastModFileTime = function () {
|
||||
return this.data.readUInt32LE(10);
|
||||
};
|
||||
FileHeader.prototype.crc32 = function () { return this.data.readUInt32LE(14); };
|
||||
/**
|
||||
* These two values are COMPLETELY USELESS.
|
||||
*
|
||||
* Section 4.4.9:
|
||||
* If bit 3 of the general purpose bit flag is set,
|
||||
* these fields are set to zero in the local header and the
|
||||
* correct values are put in the data descriptor and
|
||||
* in the central directory.
|
||||
*
|
||||
* So we'll just use the central directory's values.
|
||||
*/
|
||||
// public compressedSize(): number { return this.data.readUInt32LE(18); }
|
||||
// public uncompressedSize(): number { return this.data.readUInt32LE(22); }
|
||||
FileHeader.prototype.fileNameLength = function () { return this.data.readUInt16LE(26); };
|
||||
FileHeader.prototype.extraFieldLength = function () { return this.data.readUInt16LE(28); };
|
||||
FileHeader.prototype.fileName = function () {
|
||||
return safeToString(this.data, this.useUTF8(), 30, this.fileNameLength());
|
||||
};
|
||||
FileHeader.prototype.extraField = function () {
|
||||
var start = 30 + this.fileNameLength();
|
||||
return this.data.slice(start, start + this.extraFieldLength());
|
||||
};
|
||||
FileHeader.prototype.totalSize = function () { return 30 + this.fileNameLength() + this.extraFieldLength(); };
|
||||
FileHeader.prototype.useUTF8 = function () { return (this.flags() & 0x800) === 0x800; };
|
||||
return FileHeader;
|
||||
}());
|
||||
exports.FileHeader = FileHeader;
|
||||
/**
|
||||
* 4.3.8 File data
|
||||
*
|
||||
* Immediately following the local header for a file
|
||||
* SHOULD be placed the compressed or stored data for the file.
|
||||
* If the file is encrypted, the encryption header for the file
|
||||
* SHOULD be placed after the local header and before the file
|
||||
* data. The series of [local file header][encryption header]
|
||||
* [file data][data descriptor] repeats for each file in the
|
||||
* .ZIP archive.
|
||||
*
|
||||
* Zero-byte files, directories, and other file types that
|
||||
* contain no content MUST not include file data.
|
||||
*/
|
||||
var FileData = /** @class */ (function () {
|
||||
function FileData(header, record, data) {
|
||||
this.header = header;
|
||||
this.record = record;
|
||||
this.data = data;
|
||||
}
|
||||
FileData.prototype.decompress = function () {
|
||||
// Check the compression
|
||||
var compressionMethod = this.header.compressionMethod();
|
||||
var fcn = decompressionMethods[compressionMethod];
|
||||
if (fcn) {
|
||||
return fcn(this.data, this.record.compressedSize(), this.record.uncompressedSize(), this.record.flag());
|
||||
}
|
||||
else {
|
||||
var name_1 = CompressionMethod[compressionMethod];
|
||||
if (!name_1) {
|
||||
name_1 = "Unknown: ".concat(compressionMethod);
|
||||
}
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid compression method on file '".concat(this.header.fileName(), "': ").concat(name_1));
|
||||
}
|
||||
};
|
||||
FileData.prototype.getHeader = function () {
|
||||
return this.header;
|
||||
};
|
||||
FileData.prototype.getRecord = function () {
|
||||
return this.record;
|
||||
};
|
||||
FileData.prototype.getRawData = function () {
|
||||
return this.data;
|
||||
};
|
||||
return FileData;
|
||||
}());
|
||||
exports.FileData = FileData;
|
||||
/**
|
||||
* 4.3.9 Data descriptor:
|
||||
*
|
||||
* crc-32 4 bytes
|
||||
* compressed size 4 bytes
|
||||
* uncompressed size 4 bytes
|
||||
*/
|
||||
var DataDescriptor = /** @class */ (function () {
|
||||
function DataDescriptor(data) {
|
||||
this.data = data;
|
||||
}
|
||||
DataDescriptor.prototype.crc32 = function () { return this.data.readUInt32LE(0); };
|
||||
DataDescriptor.prototype.compressedSize = function () { return this.data.readUInt32LE(4); };
|
||||
DataDescriptor.prototype.uncompressedSize = function () { return this.data.readUInt32LE(8); };
|
||||
return DataDescriptor;
|
||||
}());
|
||||
exports.DataDescriptor = DataDescriptor;
|
||||
/*
|
||||
` 4.3.10 Archive decryption header:
|
||||
|
||||
4.3.10.1 The Archive Decryption Header is introduced in version 6.2
|
||||
of the ZIP format specification. This record exists in support
|
||||
of the Central Directory Encryption Feature implemented as part of
|
||||
the Strong Encryption Specification as described in this document.
|
||||
When the Central Directory Structure is encrypted, this decryption
|
||||
header MUST precede the encrypted data segment.
|
||||
*/
|
||||
/**
|
||||
* 4.3.11 Archive extra data record:
|
||||
*
|
||||
* archive extra data signature 4 bytes (0x08064b50)
|
||||
* extra field length 4 bytes
|
||||
* extra field data (variable size)
|
||||
*
|
||||
* 4.3.11.1 The Archive Extra Data Record is introduced in version 6.2
|
||||
* of the ZIP format specification. This record MAY be used in support
|
||||
* of the Central Directory Encryption Feature implemented as part of
|
||||
* the Strong Encryption Specification as described in this document.
|
||||
* When present, this record MUST immediately precede the central
|
||||
* directory data structure.
|
||||
*/
|
||||
var ArchiveExtraDataRecord = /** @class */ (function () {
|
||||
function ArchiveExtraDataRecord(data) {
|
||||
this.data = data;
|
||||
if (this.data.readUInt32LE(0) !== 0x08064b50) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid archive extra data record signature: " + this.data.readUInt32LE(0));
|
||||
}
|
||||
}
|
||||
ArchiveExtraDataRecord.prototype.length = function () { return this.data.readUInt32LE(4); };
|
||||
ArchiveExtraDataRecord.prototype.extraFieldData = function () { return this.data.slice(8, 8 + this.length()); };
|
||||
return ArchiveExtraDataRecord;
|
||||
}());
|
||||
exports.ArchiveExtraDataRecord = ArchiveExtraDataRecord;
|
||||
/**
|
||||
* 4.3.13 Digital signature:
|
||||
*
|
||||
* header signature 4 bytes (0x05054b50)
|
||||
* size of data 2 bytes
|
||||
* signature data (variable size)
|
||||
*
|
||||
* With the introduction of the Central Directory Encryption
|
||||
* feature in version 6.2 of this specification, the Central
|
||||
* Directory Structure MAY be stored both compressed and encrypted.
|
||||
* Although not required, it is assumed when encrypting the
|
||||
* Central Directory Structure, that it will be compressed
|
||||
* for greater storage efficiency. Information on the
|
||||
* Central Directory Encryption feature can be found in the section
|
||||
* describing the Strong Encryption Specification. The Digital
|
||||
* Signature record will be neither compressed nor encrypted.
|
||||
*/
|
||||
var DigitalSignature = /** @class */ (function () {
|
||||
function DigitalSignature(data) {
|
||||
this.data = data;
|
||||
if (this.data.readUInt32LE(0) !== 0x05054b50) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid digital signature signature: " + this.data.readUInt32LE(0));
|
||||
}
|
||||
}
|
||||
DigitalSignature.prototype.size = function () { return this.data.readUInt16LE(4); };
|
||||
DigitalSignature.prototype.signatureData = function () { return this.data.slice(6, 6 + this.size()); };
|
||||
return DigitalSignature;
|
||||
}());
|
||||
exports.DigitalSignature = DigitalSignature;
|
||||
/**
|
||||
* 4.3.12 Central directory structure:
|
||||
*
|
||||
* central file header signature 4 bytes (0x02014b50)
|
||||
* version made by 2 bytes
|
||||
* version needed to extract 2 bytes
|
||||
* general purpose bit flag 2 bytes
|
||||
* compression method 2 bytes
|
||||
* last mod file time 2 bytes
|
||||
* last mod file date 2 bytes
|
||||
* crc-32 4 bytes
|
||||
* compressed size 4 bytes
|
||||
* uncompressed size 4 bytes
|
||||
* file name length 2 bytes
|
||||
* extra field length 2 bytes
|
||||
* file comment length 2 bytes
|
||||
* disk number start 2 bytes
|
||||
* internal file attributes 2 bytes
|
||||
* external file attributes 4 bytes
|
||||
* relative offset of local header 4 bytes
|
||||
*
|
||||
* file name (variable size)
|
||||
* extra field (variable size)
|
||||
* file comment (variable size)
|
||||
*/
|
||||
var CentralDirectory = /** @class */ (function () {
|
||||
function CentralDirectory(zipData, data) {
|
||||
this.zipData = zipData;
|
||||
this.data = data;
|
||||
// Sanity check.
|
||||
if (this.data.readUInt32LE(0) !== 0x02014b50) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid Zip file: Central directory record has invalid signature: ".concat(this.data.readUInt32LE(0)));
|
||||
}
|
||||
this._filename = this.produceFilename();
|
||||
}
|
||||
CentralDirectory.prototype.versionMadeBy = function () { return this.data.readUInt16LE(4); };
|
||||
CentralDirectory.prototype.versionNeeded = function () { return this.data.readUInt16LE(6); };
|
||||
CentralDirectory.prototype.flag = function () { return this.data.readUInt16LE(8); };
|
||||
CentralDirectory.prototype.compressionMethod = function () { return this.data.readUInt16LE(10); };
|
||||
CentralDirectory.prototype.lastModFileTime = function () {
|
||||
// Time and date is in MS-DOS format.
|
||||
return msdos2date(this.data.readUInt16LE(12), this.data.readUInt16LE(14));
|
||||
};
|
||||
CentralDirectory.prototype.rawLastModFileTime = function () {
|
||||
return this.data.readUInt32LE(12);
|
||||
};
|
||||
CentralDirectory.prototype.crc32 = function () { return this.data.readUInt32LE(16); };
|
||||
CentralDirectory.prototype.compressedSize = function () { return this.data.readUInt32LE(20); };
|
||||
CentralDirectory.prototype.uncompressedSize = function () { return this.data.readUInt32LE(24); };
|
||||
CentralDirectory.prototype.fileNameLength = function () { return this.data.readUInt16LE(28); };
|
||||
CentralDirectory.prototype.extraFieldLength = function () { return this.data.readUInt16LE(30); };
|
||||
CentralDirectory.prototype.fileCommentLength = function () { return this.data.readUInt16LE(32); };
|
||||
CentralDirectory.prototype.diskNumberStart = function () { return this.data.readUInt16LE(34); };
|
||||
CentralDirectory.prototype.internalAttributes = function () { return this.data.readUInt16LE(36); };
|
||||
CentralDirectory.prototype.externalAttributes = function () { return this.data.readUInt32LE(38); };
|
||||
CentralDirectory.prototype.headerRelativeOffset = function () { return this.data.readUInt32LE(42); };
|
||||
CentralDirectory.prototype.produceFilename = function () {
|
||||
/*
|
||||
4.4.17.1 claims:
|
||||
* All slashes are forward ('/') slashes.
|
||||
* Filename doesn't begin with a slash.
|
||||
* No drive letters or any nonsense like that.
|
||||
* If filename is missing, the input came from standard input.
|
||||
|
||||
Unfortunately, this isn't true in practice. Some Windows zip utilities use
|
||||
a backslash here, but the correct Unix-style path in file headers.
|
||||
|
||||
To avoid seeking all over the file to recover the known-good filenames
|
||||
from file headers, we simply convert '/' to '\' here.
|
||||
*/
|
||||
var fileName = safeToString(this.data, this.useUTF8(), 46, this.fileNameLength());
|
||||
return fileName.replace(/\\/g, "/");
|
||||
};
|
||||
CentralDirectory.prototype.fileName = function () {
|
||||
return this._filename;
|
||||
};
|
||||
CentralDirectory.prototype.rawFileName = function () {
|
||||
return this.data.slice(46, 46 + this.fileNameLength());
|
||||
};
|
||||
CentralDirectory.prototype.extraField = function () {
|
||||
var start = 44 + this.fileNameLength();
|
||||
return this.data.slice(start, start + this.extraFieldLength());
|
||||
};
|
||||
CentralDirectory.prototype.fileComment = function () {
|
||||
var start = 46 + this.fileNameLength() + this.extraFieldLength();
|
||||
return safeToString(this.data, this.useUTF8(), start, this.fileCommentLength());
|
||||
};
|
||||
CentralDirectory.prototype.rawFileComment = function () {
|
||||
var start = 46 + this.fileNameLength() + this.extraFieldLength();
|
||||
return this.data.slice(start, start + this.fileCommentLength());
|
||||
};
|
||||
CentralDirectory.prototype.totalSize = function () {
|
||||
return 46 + this.fileNameLength() + this.extraFieldLength() + this.fileCommentLength();
|
||||
};
|
||||
CentralDirectory.prototype.isDirectory = function () {
|
||||
// NOTE: This assumes that the zip file implementation uses the lower byte
|
||||
// of external attributes for DOS attributes for
|
||||
// backwards-compatibility. This is not mandated, but appears to be
|
||||
// commonplace.
|
||||
// According to the spec, the layout of external attributes is
|
||||
// platform-dependent.
|
||||
// If that fails, we also check if the name of the file ends in '/',
|
||||
// which is what Java's ZipFile implementation does.
|
||||
var fileName = this.fileName();
|
||||
return (this.externalAttributes() & 0x10 ? true : false) || (fileName.charAt(fileName.length - 1) === '/');
|
||||
};
|
||||
CentralDirectory.prototype.isFile = function () { return !this.isDirectory(); };
|
||||
CentralDirectory.prototype.useUTF8 = function () { return (this.flag() & 0x800) === 0x800; };
|
||||
CentralDirectory.prototype.isEncrypted = function () { return (this.flag() & 0x1) === 0x1; };
|
||||
CentralDirectory.prototype.getFileData = function () {
|
||||
// Need to grab the header before we can figure out where the actual
|
||||
// compressed data starts.
|
||||
var start = this.headerRelativeOffset();
|
||||
var header = new FileHeader(this.zipData.slice(start));
|
||||
return new FileData(header, this, this.zipData.slice(start + header.totalSize()));
|
||||
};
|
||||
CentralDirectory.prototype.getData = function () {
|
||||
return this.getFileData().decompress();
|
||||
};
|
||||
CentralDirectory.prototype.getRawData = function () {
|
||||
return this.getFileData().getRawData();
|
||||
};
|
||||
CentralDirectory.prototype.getStats = function () {
|
||||
return new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, this.uncompressedSize(), 0x16D, Date.now(), this.lastModFileTime().getTime());
|
||||
};
|
||||
return CentralDirectory;
|
||||
}());
|
||||
exports.CentralDirectory = CentralDirectory;
|
||||
/**
|
||||
* 4.3.16: end of central directory record
|
||||
* end of central dir signature 4 bytes (0x06054b50)
|
||||
* number of this disk 2 bytes
|
||||
* number of the disk with the
|
||||
* start of the central directory 2 bytes
|
||||
* total number of entries in the
|
||||
* central directory on this disk 2 bytes
|
||||
* total number of entries in
|
||||
* the central directory 2 bytes
|
||||
* size of the central directory 4 bytes
|
||||
* offset of start of central
|
||||
* directory with respect to
|
||||
* the starting disk number 4 bytes
|
||||
* .ZIP file comment length 2 bytes
|
||||
* .ZIP file comment (variable size)
|
||||
*/
|
||||
var EndOfCentralDirectory = /** @class */ (function () {
|
||||
function EndOfCentralDirectory(data) {
|
||||
this.data = data;
|
||||
if (this.data.readUInt32LE(0) !== 0x06054b50) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid Zip file: End of central directory record has invalid signature: ".concat(this.data.readUInt32LE(0)));
|
||||
}
|
||||
}
|
||||
EndOfCentralDirectory.prototype.diskNumber = function () { return this.data.readUInt16LE(4); };
|
||||
EndOfCentralDirectory.prototype.cdDiskNumber = function () { return this.data.readUInt16LE(6); };
|
||||
EndOfCentralDirectory.prototype.cdDiskEntryCount = function () { return this.data.readUInt16LE(8); };
|
||||
EndOfCentralDirectory.prototype.cdTotalEntryCount = function () { return this.data.readUInt16LE(10); };
|
||||
EndOfCentralDirectory.prototype.cdSize = function () { return this.data.readUInt32LE(12); };
|
||||
EndOfCentralDirectory.prototype.cdOffset = function () { return this.data.readUInt32LE(16); };
|
||||
EndOfCentralDirectory.prototype.cdZipCommentLength = function () { return this.data.readUInt16LE(20); };
|
||||
EndOfCentralDirectory.prototype.cdZipComment = function () {
|
||||
// Assuming UTF-8. The specification doesn't specify.
|
||||
return safeToString(this.data, true, 22, this.cdZipCommentLength());
|
||||
};
|
||||
EndOfCentralDirectory.prototype.rawCdZipComment = function () {
|
||||
return this.data.slice(22, 22 + this.cdZipCommentLength());
|
||||
};
|
||||
return EndOfCentralDirectory;
|
||||
}());
|
||||
exports.EndOfCentralDirectory = EndOfCentralDirectory;
|
||||
/**
|
||||
* Contains the table of contents of a Zip file.
|
||||
*/
|
||||
var ZipTOC = /** @class */ (function () {
|
||||
function ZipTOC(index, directoryEntries, eocd, data) {
|
||||
this.index = index;
|
||||
this.directoryEntries = directoryEntries;
|
||||
this.eocd = eocd;
|
||||
this.data = data;
|
||||
}
|
||||
return ZipTOC;
|
||||
}());
|
||||
exports.ZipTOC = ZipTOC;
|
||||
/**
|
||||
* Zip file-backed filesystem
|
||||
* Implemented according to the standard:
|
||||
* http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
||||
*
|
||||
* While there are a few zip libraries for JavaScript (e.g. JSZip and zip.js),
|
||||
* they are not a good match for BrowserFS. In particular, these libraries
|
||||
* perform a lot of unneeded data copying, and eagerly decompress every file
|
||||
* in the zip file upon loading to check the CRC32. They also eagerly decode
|
||||
* strings. Furthermore, these libraries duplicate functionality already present
|
||||
* in BrowserFS (e.g. UTF-8 decoding and binary data manipulation).
|
||||
*
|
||||
* This filesystem takes advantage of BrowserFS's Buffer implementation, which
|
||||
* efficiently represents the zip file in memory (in both ArrayBuffer-enabled
|
||||
* browsers *and* non-ArrayBuffer browsers), and which can neatly be 'sliced'
|
||||
* without copying data. Each struct defined in the standard is represented with
|
||||
* a buffer slice pointing to an offset in the zip file, and has getters for
|
||||
* each field. As we anticipate that this data will not be read often, we choose
|
||||
* not to store each struct field in the JavaScript object; instead, to reduce
|
||||
* memory consumption, we retrieve it directly from the binary data each time it
|
||||
* is requested.
|
||||
*
|
||||
* When the filesystem is instantiated, we determine the directory structure
|
||||
* of the zip file as quickly as possible. We lazily decompress and check the
|
||||
* CRC32 of files. We do not cache decompressed files; if this is a desired
|
||||
* feature, it is best implemented as a generic file system wrapper that can
|
||||
* cache data from arbitrary file systems.
|
||||
*
|
||||
* For inflation, we use `pako`'s implementation:
|
||||
* https://github.com/nodeca/pako
|
||||
*
|
||||
* Current limitations:
|
||||
* * No encryption.
|
||||
* * No ZIP64 support.
|
||||
* * Read-only.
|
||||
* Write support would require that we:
|
||||
* - Keep track of changed/new files.
|
||||
* - Compress changed files, and generate appropriate metadata for each.
|
||||
* - Update file offsets for other files in the zip file.
|
||||
* - Stream it out to a location.
|
||||
* This isn't that bad, so we might do this at a later date.
|
||||
*/
|
||||
var ZipFS = /** @class */ (function (_super) {
|
||||
__extends(ZipFS, _super);
|
||||
function ZipFS(input, name) {
|
||||
if (name === void 0) { name = ''; }
|
||||
var _this = _super.call(this) || this;
|
||||
_this.name = name;
|
||||
_this._index = new file_index_1.FileIndex();
|
||||
_this._directoryEntries = [];
|
||||
_this._eocd = null;
|
||||
_this._index = input.index;
|
||||
_this._directoryEntries = input.directoryEntries;
|
||||
_this._eocd = input.eocd;
|
||||
_this.data = input.data;
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* Constructs a ZipFS instance with the given options.
|
||||
*/
|
||||
ZipFS.Create = function (opts, cb) {
|
||||
try {
|
||||
ZipFS._computeIndex(opts.zipData, function (e, zipTOC) {
|
||||
if (zipTOC) {
|
||||
var fs = new ZipFS(zipTOC, opts.name);
|
||||
cb(null, fs);
|
||||
}
|
||||
else {
|
||||
cb(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
ZipFS.isAvailable = function () { return true; };
|
||||
ZipFS.RegisterDecompressionMethod = function (m, fcn) {
|
||||
decompressionMethods[m] = fcn;
|
||||
};
|
||||
/**
|
||||
* Locates the end of central directory record at the end of the file.
|
||||
* Throws an exception if it cannot be found.
|
||||
*/
|
||||
ZipFS._getEOCD = function (data) {
|
||||
// Unfortunately, the comment is variable size and up to 64K in size.
|
||||
// We assume that the magic signature does not appear in the comment, and
|
||||
// in the bytes between the comment and the signature. Other ZIP
|
||||
// implementations make this same assumption, since the alternative is to
|
||||
// read thread every entry in the file to get to it. :(
|
||||
// These are *negative* offsets from the end of the file.
|
||||
var startOffset = 22;
|
||||
var endOffset = Math.min(startOffset + 0xFFFF, data.length - 1);
|
||||
// There's not even a byte alignment guarantee on the comment so we need to
|
||||
// search byte by byte. *grumble grumble*
|
||||
for (var i = startOffset; i < endOffset; i++) {
|
||||
// Magic number: EOCD Signature
|
||||
if (data.readUInt32LE(data.length - i) === 0x06054b50) {
|
||||
return new EndOfCentralDirectory(data.slice(data.length - i));
|
||||
}
|
||||
}
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid ZIP file: Could not locate End of Central Directory signature.");
|
||||
};
|
||||
ZipFS._addToIndex = function (cd, index) {
|
||||
// Paths must be absolute, yet zip file paths are always relative to the
|
||||
// zip root. So we append '/' and call it a day.
|
||||
var filename = cd.fileName();
|
||||
if (filename.charAt(0) === '/') {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, "Unexpectedly encountered an absolute path in a zip file. Please file a bug.");
|
||||
}
|
||||
// XXX: For the file index, strip the trailing '/'.
|
||||
if (filename.charAt(filename.length - 1) === '/') {
|
||||
filename = filename.substr(0, filename.length - 1);
|
||||
}
|
||||
if (cd.isDirectory()) {
|
||||
index.addPathFast('/' + filename, new file_index_1.DirInode(cd));
|
||||
}
|
||||
else {
|
||||
index.addPathFast('/' + filename, new file_index_1.FileInode(cd));
|
||||
}
|
||||
};
|
||||
ZipFS._computeIndex = function (data, cb) {
|
||||
try {
|
||||
var index = new file_index_1.FileIndex();
|
||||
var eocd = ZipFS._getEOCD(data);
|
||||
if (eocd.diskNumber() !== eocd.cdDiskNumber()) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "ZipFS does not support spanned zip files."));
|
||||
}
|
||||
var cdPtr = eocd.cdOffset();
|
||||
if (cdPtr === 0xFFFFFFFF) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "ZipFS does not support Zip64."));
|
||||
}
|
||||
var cdEnd = cdPtr + eocd.cdSize();
|
||||
ZipFS._computeIndexResponsive(data, index, cdPtr, cdEnd, cb, [], eocd);
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
ZipFS._computeIndexResponsiveTrampoline = function (data, index, cdPtr, cdEnd, cb, cdEntries, eocd) {
|
||||
try {
|
||||
ZipFS._computeIndexResponsive(data, index, cdPtr, cdEnd, cb, cdEntries, eocd);
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
ZipFS._computeIndexResponsive = function (data, index, cdPtr, cdEnd, cb, cdEntries, eocd) {
|
||||
if (cdPtr < cdEnd) {
|
||||
var count = 0;
|
||||
while (count++ < 200 && cdPtr < cdEnd) {
|
||||
var cd = new CentralDirectory(data, data.slice(cdPtr));
|
||||
ZipFS._addToIndex(cd, index);
|
||||
cdPtr += cd.totalSize();
|
||||
cdEntries.push(cd);
|
||||
}
|
||||
(0, setImmediate_1.default)(function () {
|
||||
ZipFS._computeIndexResponsiveTrampoline(data, index, cdPtr, cdEnd, cb, cdEntries, eocd);
|
||||
});
|
||||
}
|
||||
else {
|
||||
cb(null, new ZipTOC(index, cdEntries, eocd, data));
|
||||
}
|
||||
};
|
||||
ZipFS.prototype.getName = function () {
|
||||
return ZipFS.Name + (this.name !== '' ? " ".concat(this.name) : '');
|
||||
};
|
||||
/**
|
||||
* Get the CentralDirectory object for the given path.
|
||||
*/
|
||||
ZipFS.prototype.getCentralDirectoryEntry = function (path) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
return inode.getData();
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
return inode.getData();
|
||||
}
|
||||
else {
|
||||
// Should never occur.
|
||||
throw api_error_1.ApiError.EPERM("Invalid inode: ".concat(inode));
|
||||
}
|
||||
};
|
||||
ZipFS.prototype.getCentralDirectoryEntryAt = function (index) {
|
||||
var dirEntry = this._directoryEntries[index];
|
||||
if (!dirEntry) {
|
||||
throw new RangeError("Invalid directory index: ".concat(index, "."));
|
||||
}
|
||||
return dirEntry;
|
||||
};
|
||||
ZipFS.prototype.getNumberOfCentralDirectoryEntries = function () {
|
||||
return this._directoryEntries.length;
|
||||
};
|
||||
ZipFS.prototype.getEndOfCentralDirectory = function () {
|
||||
return this._eocd;
|
||||
};
|
||||
ZipFS.prototype.diskSpace = function (path, cb) {
|
||||
// Read-only file system.
|
||||
cb(this.data.length, 0);
|
||||
};
|
||||
ZipFS.prototype.isReadOnly = function () {
|
||||
return true;
|
||||
};
|
||||
ZipFS.prototype.supportsLinks = function () {
|
||||
return false;
|
||||
};
|
||||
ZipFS.prototype.supportsProps = function () {
|
||||
return false;
|
||||
};
|
||||
ZipFS.prototype.supportsSynch = function () {
|
||||
return true;
|
||||
};
|
||||
ZipFS.prototype.statSync = function (path, isLstat) {
|
||||
var inode = this._index.getInode(path);
|
||||
if (inode === null) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
var stats;
|
||||
if ((0, file_index_1.isFileInode)(inode)) {
|
||||
stats = inode.getData().getStats();
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
stats = inode.getStats();
|
||||
}
|
||||
else {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid inode.");
|
||||
}
|
||||
return stats;
|
||||
};
|
||||
ZipFS.prototype.openSync = function (path, flags, mode) {
|
||||
// INVARIANT: Cannot write to RO file systems.
|
||||
if (flags.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, path);
|
||||
}
|
||||
// Check if the path exists, and is a file.
|
||||
var inode = this._index.getInode(path);
|
||||
if (!inode) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
else if ((0, file_index_1.isFileInode)(inode)) {
|
||||
var cdRecord = inode.getData();
|
||||
var stats = cdRecord.getStats();
|
||||
switch (flags.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
throw api_error_1.ApiError.EEXIST(path);
|
||||
case file_flag_1.ActionType.NOP:
|
||||
return new preload_file_1.NoSyncFile(this, path, flags, stats, cdRecord.getData());
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileMode object.');
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.EISDIR(path);
|
||||
}
|
||||
};
|
||||
ZipFS.prototype.readdirSync = function (path) {
|
||||
// Check if it exists.
|
||||
var inode = this._index.getInode(path);
|
||||
if (!inode) {
|
||||
throw api_error_1.ApiError.ENOENT(path);
|
||||
}
|
||||
else if ((0, file_index_1.isDirInode)(inode)) {
|
||||
return inode.getListing();
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.ENOTDIR(path);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Specially-optimized readfile.
|
||||
*/
|
||||
ZipFS.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
// Get file.
|
||||
var fd = this.openSync(fname, flag, 0x1a4);
|
||||
try {
|
||||
var fdCast = fd;
|
||||
var fdBuff = fdCast.getBuffer();
|
||||
if (encoding === null) {
|
||||
return (0, util_1.copyingSlice)(fdBuff);
|
||||
}
|
||||
return fdBuff.toString(encoding);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
ZipFS.Name = "ZipFS";
|
||||
ZipFS.Options = {
|
||||
zipData: {
|
||||
type: "object",
|
||||
description: "The zip file as a Buffer object.",
|
||||
validator: util_1.bufferValidator
|
||||
},
|
||||
name: {
|
||||
type: "string",
|
||||
optional: true,
|
||||
description: "The name of the zip file (optional)."
|
||||
}
|
||||
};
|
||||
ZipFS.CompressionMethod = CompressionMethod;
|
||||
return ZipFS;
|
||||
}(file_system_1.SynchronousFileSystem));
|
||||
exports.default = ZipFS;
|
||||
ZipFS.RegisterDecompressionMethod(CompressionMethod.DEFLATE, function (data, compressedSize, uncompressedSize) {
|
||||
return (0, util_1.arrayish2Buffer)(inflateRaw(data.slice(0, compressedSize), { chunkSize: uncompressedSize }));
|
||||
});
|
||||
ZipFS.RegisterDecompressionMethod(CompressionMethod.STORED, function (data, compressedSize, uncompressedSize) {
|
||||
return (0, util_1.copyingSlice)(data, 0, uncompressedSize);
|
||||
});
|
||||
//# sourceMappingURL=ZipFS.js.map
|
||||
701
sandpack-generated/static/browserfs11/node/core/FS.d.ts
vendored
Normal file
701
sandpack-generated/static/browserfs11/node/core/FS.d.ts
vendored
Normal file
@@ -0,0 +1,701 @@
|
||||
/// <reference types="node" />
|
||||
/// <reference types="node" />
|
||||
import * as _fs from 'fs';
|
||||
import { ApiError } from './api_error';
|
||||
import { FileSystem, BFSOneArgCallback, BFSCallback, BFSThreeArgCallback } from './file_system';
|
||||
import Stats from './node_fs_stats';
|
||||
/**
|
||||
* The node frontend to all filesystems.
|
||||
* This layer handles:
|
||||
*
|
||||
* * Sanity checking inputs.
|
||||
* * Normalizing paths.
|
||||
* * Resetting stack depth for asynchronous operations which may not go through
|
||||
* the browser by wrapping all input callbacks using `setImmediate`.
|
||||
* * Performing the requested operation through the filesystem or the file
|
||||
* descriptor, as appropriate.
|
||||
* * Handling optional arguments and setting default arguments.
|
||||
* @see http://nodejs.org/api/fs.html
|
||||
*/
|
||||
export default class FS {
|
||||
static Stats: typeof Stats;
|
||||
static F_OK: number;
|
||||
static R_OK: number;
|
||||
static W_OK: number;
|
||||
static X_OK: number;
|
||||
private root;
|
||||
private fdMap;
|
||||
private nextFd;
|
||||
private fileWatcher;
|
||||
initialize(rootFS: FileSystem): FileSystem;
|
||||
/**
|
||||
* converts Date or number to a fractional UNIX timestamp
|
||||
* Grabbed from NodeJS sources (lib/fs.js)
|
||||
*/
|
||||
_toUnixTimestamp(time: Date | number): number;
|
||||
/**
|
||||
* **NONSTANDARD**: Grab the FileSystem instance that backs this API.
|
||||
* @return [BrowserFS.FileSystem | null] Returns null if the file system has
|
||||
* not been initialized.
|
||||
*/
|
||||
getRootFS(): FileSystem | null;
|
||||
/**
|
||||
* Asynchronous rename. No arguments other than a possible exception are given
|
||||
* to the completion callback.
|
||||
* @param oldPath
|
||||
* @param newPath
|
||||
* @param callback
|
||||
*/
|
||||
rename(oldPath: string, newPath: string, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous rename.
|
||||
* @param oldPath
|
||||
* @param newPath
|
||||
*/
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
/**
|
||||
* Test whether or not the given path exists by checking with the file system.
|
||||
* Then call the callback argument with either true or false.
|
||||
* @example Sample invocation
|
||||
* fs.exists('/etc/passwd', function (exists) {
|
||||
* util.debug(exists ? "it's there" : "no passwd!");
|
||||
* });
|
||||
* @param path
|
||||
* @param callback
|
||||
*/
|
||||
exists(path: string, cb?: (exists: boolean) => any): void;
|
||||
/**
|
||||
* Test whether or not the given path exists by checking with the file system.
|
||||
* @param path
|
||||
* @return [boolean]
|
||||
*/
|
||||
existsSync(path: string): boolean;
|
||||
/**
|
||||
* Asynchronous `stat`.
|
||||
* @param path
|
||||
* @param callback
|
||||
*/
|
||||
stat(path: string, cb?: BFSCallback<Stats>): void;
|
||||
/**
|
||||
* Synchronous `stat`.
|
||||
* @param path
|
||||
* @return [BrowserFS.node.fs.Stats]
|
||||
*/
|
||||
statSync(path: string): Stats;
|
||||
/**
|
||||
* Asynchronous `lstat`.
|
||||
* `lstat()` is identical to `stat()`, except that if path is a symbolic link,
|
||||
* then the link itself is stat-ed, not the file that it refers to.
|
||||
* @param path
|
||||
* @param callback
|
||||
*/
|
||||
lstat(path: string, cb?: BFSCallback<Stats>): void;
|
||||
/**
|
||||
* Synchronous `lstat`.
|
||||
* `lstat()` is identical to `stat()`, except that if path is a symbolic link,
|
||||
* then the link itself is stat-ed, not the file that it refers to.
|
||||
* @param path
|
||||
* @return [BrowserFS.node.fs.Stats]
|
||||
*/
|
||||
lstatSync(path: string): Stats;
|
||||
/**
|
||||
* Asynchronous `truncate`.
|
||||
* @param path
|
||||
* @param len
|
||||
* @param callback
|
||||
*/
|
||||
truncate(path: string, cb?: BFSOneArgCallback): void;
|
||||
truncate(path: string, len: number, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `truncate`.
|
||||
* @param path
|
||||
* @param len
|
||||
*/
|
||||
truncateSync(path: string, len?: number): void;
|
||||
/**
|
||||
* Asynchronous `unlink`.
|
||||
* @param path
|
||||
* @param callback
|
||||
*/
|
||||
unlink(path: string, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `unlink`.
|
||||
* @param path
|
||||
*/
|
||||
unlinkSync(path: string): void;
|
||||
/**
|
||||
* Asynchronous file open.
|
||||
* Exclusive mode ensures that path is newly created.
|
||||
*
|
||||
* `flags` can be:
|
||||
*
|
||||
* * `'r'` - Open file for reading. An exception occurs if the file does not exist.
|
||||
* * `'r+'` - Open file for reading and writing. An exception occurs if the file does not exist.
|
||||
* * `'rs'` - Open file for reading in synchronous mode. Instructs the filesystem to not cache writes.
|
||||
* * `'rs+'` - Open file for reading and writing, and opens the file in synchronous mode.
|
||||
* * `'w'` - Open file for writing. The file is created (if it does not exist) or truncated (if it exists).
|
||||
* * `'wx'` - Like 'w' but opens the file in exclusive mode.
|
||||
* * `'w+'` - Open file for reading and writing. The file is created (if it does not exist) or truncated (if it exists).
|
||||
* * `'wx+'` - Like 'w+' but opens the file in exclusive mode.
|
||||
* * `'a'` - Open file for appending. The file is created if it does not exist.
|
||||
* * `'ax'` - Like 'a' but opens the file in exclusive mode.
|
||||
* * `'a+'` - Open file for reading and appending. The file is created if it does not exist.
|
||||
* * `'ax+'` - Like 'a+' but opens the file in exclusive mode.
|
||||
*
|
||||
* @see http://www.manpagez.com/man/2/open/
|
||||
* @param path
|
||||
* @param flags
|
||||
* @param mode defaults to `0644`
|
||||
* @param callback
|
||||
*/
|
||||
open(path: string, flag: string, cb?: BFSCallback<number>): void;
|
||||
open(path: string, flag: string, mode: number | string, cb?: BFSCallback<number>): void;
|
||||
/**
|
||||
* Synchronous file open.
|
||||
* @see http://www.manpagez.com/man/2/open/
|
||||
* @param path
|
||||
* @param flags
|
||||
* @param mode defaults to `0644`
|
||||
* @return [BrowserFS.File]
|
||||
*/
|
||||
openSync(path: string, flag: string, mode?: number | string): number;
|
||||
/**
|
||||
* Asynchronously reads the entire contents of a file.
|
||||
* @example Usage example
|
||||
* fs.readFile('/etc/passwd', function (err, data) {
|
||||
* if (err) throw err;
|
||||
* console.log(data);
|
||||
* });
|
||||
* @param filename
|
||||
* @param options
|
||||
* @option options [String] encoding The string encoding for the file contents. Defaults to `null`.
|
||||
* @option options [String] flag Defaults to `'r'`.
|
||||
* @param callback If no encoding is specified, then the raw buffer is returned.
|
||||
*/
|
||||
readFile(filename: string, cb: BFSCallback<Buffer>): void;
|
||||
readFile(filename: string, options: {
|
||||
flag?: string;
|
||||
}, callback?: BFSCallback<Buffer>): void;
|
||||
readFile(filename: string, options: {
|
||||
encoding: string;
|
||||
flag?: string;
|
||||
}, callback?: BFSCallback<string>): void;
|
||||
readFile(filename: string, encoding: string, cb: BFSCallback<string>): void;
|
||||
/**
|
||||
* Synchronously reads the entire contents of a file.
|
||||
* @param filename
|
||||
* @param options
|
||||
* @option options [String] encoding The string encoding for the file contents. Defaults to `null`.
|
||||
* @option options [String] flag Defaults to `'r'`.
|
||||
* @return [String | BrowserFS.node.Buffer]
|
||||
*/
|
||||
readFileSync(filename: string, options?: {
|
||||
flag?: string;
|
||||
}): Buffer;
|
||||
readFileSync(filename: string, options: {
|
||||
encoding: string;
|
||||
flag?: string;
|
||||
}): string;
|
||||
readFileSync(filename: string, encoding: string): string;
|
||||
/**
|
||||
* Asynchronously writes data to a file, replacing the file if it already
|
||||
* exists.
|
||||
*
|
||||
* The encoding option is ignored if data is a buffer.
|
||||
*
|
||||
* @example Usage example
|
||||
* fs.writeFile('message.txt', 'Hello Node', function (err) {
|
||||
* if (err) throw err;
|
||||
* console.log('It\'s saved!');
|
||||
* });
|
||||
* @param filename
|
||||
* @param data
|
||||
* @param options
|
||||
* @option options [String] encoding Defaults to `'utf8'`.
|
||||
* @option options [Number] mode Defaults to `0644`.
|
||||
* @option options [String] flag Defaults to `'w'`.
|
||||
* @param callback
|
||||
*/
|
||||
writeFile(filename: string, data: any, cb?: BFSOneArgCallback): void;
|
||||
writeFile(filename: string, data: any, encoding?: string, cb?: BFSOneArgCallback): void;
|
||||
writeFile(filename: string, data: any, options?: {
|
||||
encoding?: string;
|
||||
mode?: string | number;
|
||||
flag?: string;
|
||||
}, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronously writes data to a file, replacing the file if it already
|
||||
* exists.
|
||||
*
|
||||
* The encoding option is ignored if data is a buffer.
|
||||
* @param filename
|
||||
* @param data
|
||||
* @param options
|
||||
* @option options [String] encoding Defaults to `'utf8'`.
|
||||
* @option options [Number] mode Defaults to `0644`.
|
||||
* @option options [String] flag Defaults to `'w'`.
|
||||
*/
|
||||
writeFileSync(filename: string, data: any, options?: {
|
||||
encoding?: string;
|
||||
mode?: number | string;
|
||||
flag?: string;
|
||||
}): void;
|
||||
writeFileSync(filename: string, data: any, encoding?: string): void;
|
||||
/**
|
||||
* Asynchronously append data to a file, creating the file if it not yet
|
||||
* exists.
|
||||
*
|
||||
* @example Usage example
|
||||
* fs.appendFile('message.txt', 'data to append', function (err) {
|
||||
* if (err) throw err;
|
||||
* console.log('The "data to append" was appended to file!');
|
||||
* });
|
||||
* @param filename
|
||||
* @param data
|
||||
* @param options
|
||||
* @option options [String] encoding Defaults to `'utf8'`.
|
||||
* @option options [Number] mode Defaults to `0644`.
|
||||
* @option options [String] flag Defaults to `'a'`.
|
||||
* @param callback
|
||||
*/
|
||||
appendFile(filename: string, data: any, cb?: BFSOneArgCallback): void;
|
||||
appendFile(filename: string, data: any, options?: {
|
||||
encoding?: string;
|
||||
mode?: number | string;
|
||||
flag?: string;
|
||||
}, cb?: BFSOneArgCallback): void;
|
||||
appendFile(filename: string, data: any, encoding?: string, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Asynchronously append data to a file, creating the file if it not yet
|
||||
* exists.
|
||||
*
|
||||
* @example Usage example
|
||||
* fs.appendFile('message.txt', 'data to append', function (err) {
|
||||
* if (err) throw err;
|
||||
* console.log('The "data to append" was appended to file!');
|
||||
* });
|
||||
* @param filename
|
||||
* @param data
|
||||
* @param options
|
||||
* @option options [String] encoding Defaults to `'utf8'`.
|
||||
* @option options [Number] mode Defaults to `0644`.
|
||||
* @option options [String] flag Defaults to `'a'`.
|
||||
*/
|
||||
appendFileSync(filename: string, data: any, options?: {
|
||||
encoding?: string;
|
||||
mode?: number | string;
|
||||
flag?: string;
|
||||
}): void;
|
||||
appendFileSync(filename: string, data: any, encoding?: string): void;
|
||||
/**
|
||||
* Asynchronous `fstat`.
|
||||
* `fstat()` is identical to `stat()`, except that the file to be stat-ed is
|
||||
* specified by the file descriptor `fd`.
|
||||
* @param fd
|
||||
* @param callback
|
||||
*/
|
||||
fstat(fd: number, cb?: BFSCallback<Stats>): void;
|
||||
/**
|
||||
* Synchronous `fstat`.
|
||||
* `fstat()` is identical to `stat()`, except that the file to be stat-ed is
|
||||
* specified by the file descriptor `fd`.
|
||||
* @param fd
|
||||
* @return [BrowserFS.node.fs.Stats]
|
||||
*/
|
||||
fstatSync(fd: number): Stats;
|
||||
/**
|
||||
* Asynchronous close.
|
||||
* @param fd
|
||||
* @param callback
|
||||
*/
|
||||
close(fd: number, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous close.
|
||||
* @param fd
|
||||
*/
|
||||
closeSync(fd: number): void;
|
||||
/**
|
||||
* Asynchronous ftruncate.
|
||||
* @param fd
|
||||
* @param len
|
||||
* @param callback
|
||||
*/
|
||||
ftruncate(fd: number, cb?: BFSOneArgCallback): void;
|
||||
ftruncate(fd: number, len?: number, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous ftruncate.
|
||||
* @param fd
|
||||
* @param len
|
||||
*/
|
||||
ftruncateSync(fd: number, len?: number): void;
|
||||
/**
|
||||
* Asynchronous fsync.
|
||||
* @param fd
|
||||
* @param callback
|
||||
*/
|
||||
fsync(fd: number, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous fsync.
|
||||
* @param fd
|
||||
*/
|
||||
fsyncSync(fd: number): void;
|
||||
/**
|
||||
* Asynchronous fdatasync.
|
||||
* @param fd
|
||||
* @param callback
|
||||
*/
|
||||
fdatasync(fd: number, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous fdatasync.
|
||||
* @param fd
|
||||
*/
|
||||
fdatasyncSync(fd: number): void;
|
||||
/**
|
||||
* Write buffer to the file specified by `fd`.
|
||||
* Note that it is unsafe to use fs.write multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param fd
|
||||
* @param buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param offset Offset in the buffer to start reading data from.
|
||||
* @param length The amount of bytes to write to the file.
|
||||
* @param position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @param callback The number specifies the number of bytes written into the file.
|
||||
*/
|
||||
write(fd: number, buffer: Buffer, offset: number, length: number, cb?: BFSThreeArgCallback<number, Buffer>): void;
|
||||
write(fd: number, buffer: Buffer, offset: number, length: number, position: number | null, cb?: BFSThreeArgCallback<number, Buffer>): void;
|
||||
write(fd: number, data: any, cb?: BFSThreeArgCallback<number, string>): void;
|
||||
write(fd: number, data: any, position: number | null, cb?: BFSThreeArgCallback<number, string>): void;
|
||||
write(fd: number, data: any, position: number | null, encoding: string, cb?: BFSThreeArgCallback<number, string>): void;
|
||||
/**
|
||||
* Write buffer to the file specified by `fd`.
|
||||
* Note that it is unsafe to use fs.write multiple times on the same file
|
||||
* without waiting for it to return.
|
||||
* @param fd
|
||||
* @param buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param offset Offset in the buffer to start reading data from.
|
||||
* @param length The amount of bytes to write to the file.
|
||||
* @param position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
*/
|
||||
writeSync(fd: number, buffer: Buffer, offset: number, length: number, position?: number | null): number;
|
||||
writeSync(fd: number, data: string, position?: number | null, encoding?: string): number;
|
||||
/**
|
||||
* Read data from the file specified by `fd`.
|
||||
* @param buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param length An integer specifying the number of bytes to read.
|
||||
* @param position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @param callback The number is the number of bytes read
|
||||
*/
|
||||
read(fd: number, length: number, position: number | null, encoding: string, cb?: BFSThreeArgCallback<string, number>): void;
|
||||
read(fd: number, buffer: Buffer, offset: number, length: number, position: number | null, cb?: BFSThreeArgCallback<number, Buffer>): void;
|
||||
/**
|
||||
* Read data from the file specified by `fd`.
|
||||
* @param fd
|
||||
* @param buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param length An integer specifying the number of bytes to read.
|
||||
* @param position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @return [Number]
|
||||
*/
|
||||
readSync(fd: number, length: number, position: number, encoding: string): string;
|
||||
readSync(fd: number, buffer: Buffer, offset: number, length: number, position: number): number;
|
||||
/**
|
||||
* Asynchronous `fchown`.
|
||||
* @param fd
|
||||
* @param uid
|
||||
* @param gid
|
||||
* @param callback
|
||||
*/
|
||||
fchown(fd: number, uid: number, gid: number, callback?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `fchown`.
|
||||
* @param fd
|
||||
* @param uid
|
||||
* @param gid
|
||||
*/
|
||||
fchownSync(fd: number, uid: number, gid: number): void;
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param fd
|
||||
* @param mode
|
||||
* @param callback
|
||||
*/
|
||||
fchmod(fd: number, mode: string | number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `fchmod`.
|
||||
* @param fd
|
||||
* @param mode
|
||||
*/
|
||||
fchmodSync(fd: number, mode: number | string): void;
|
||||
/**
|
||||
* Change the file timestamps of a file referenced by the supplied file
|
||||
* descriptor.
|
||||
* @param fd
|
||||
* @param atime
|
||||
* @param mtime
|
||||
* @param callback
|
||||
*/
|
||||
futimes(fd: number, atime: number | Date, mtime: number | Date, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Change the file timestamps of a file referenced by the supplied file
|
||||
* descriptor.
|
||||
* @param fd
|
||||
* @param atime
|
||||
* @param mtime
|
||||
*/
|
||||
futimesSync(fd: number, atime: number | Date, mtime: number | Date): void;
|
||||
/**
|
||||
* Asynchronous `rmdir`.
|
||||
* @param path
|
||||
* @param callback
|
||||
*/
|
||||
rmdir(path: string, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `rmdir`.
|
||||
* @param path
|
||||
*/
|
||||
rmdirSync(path: string): void;
|
||||
/**
|
||||
* Asynchronous `mkdir`.
|
||||
* @param path
|
||||
* @param mode defaults to `0777`
|
||||
* @param callback
|
||||
*/
|
||||
mkdir(path: string, mode?: any, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `mkdir`.
|
||||
* @param path
|
||||
* @param mode defaults to `0777`
|
||||
*/
|
||||
mkdirSync(path: string, mode?: number | string): void;
|
||||
/**
|
||||
* Asynchronous `readdir`. Reads the contents of a directory.
|
||||
* The callback gets two arguments `(err, files)` where `files` is an array of
|
||||
* the names of the files in the directory excluding `'.'` and `'..'`.
|
||||
* @param path
|
||||
* @param callback
|
||||
*/
|
||||
readdir(path: string, cb?: BFSCallback<string[]>): void;
|
||||
/**
|
||||
* Synchronous `readdir`. Reads the contents of a directory.
|
||||
* @param path
|
||||
* @return [String[]]
|
||||
*/
|
||||
readdirSync(path: string): string[];
|
||||
/**
|
||||
* Asynchronous `link`.
|
||||
* @param srcpath
|
||||
* @param dstpath
|
||||
* @param callback
|
||||
*/
|
||||
link(srcpath: string, dstpath: string, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `link`.
|
||||
* @param srcpath
|
||||
* @param dstpath
|
||||
*/
|
||||
linkSync(srcpath: string, dstpath: string): void;
|
||||
/**
|
||||
* Asynchronous `symlink`.
|
||||
* @param srcpath
|
||||
* @param dstpath
|
||||
* @param type can be either `'dir'` or `'file'` (default is `'file'`)
|
||||
* @param callback
|
||||
*/
|
||||
symlink(srcpath: string, dstpath: string, cb?: BFSOneArgCallback): void;
|
||||
symlink(srcpath: string, dstpath: string, type?: string, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `symlink`.
|
||||
* @param srcpath
|
||||
* @param dstpath
|
||||
* @param type can be either `'dir'` or `'file'` (default is `'file'`)
|
||||
*/
|
||||
symlinkSync(srcpath: string, dstpath: string, type?: string): void;
|
||||
/**
|
||||
* Asynchronous readlink.
|
||||
* @param path
|
||||
* @param callback
|
||||
*/
|
||||
readlink(path: string, cb?: BFSCallback<string>): void;
|
||||
/**
|
||||
* Synchronous readlink.
|
||||
* @param path
|
||||
* @return [String]
|
||||
*/
|
||||
readlinkSync(path: string): string;
|
||||
/**
|
||||
* Asynchronous `chown`.
|
||||
* @param path
|
||||
* @param uid
|
||||
* @param gid
|
||||
* @param callback
|
||||
*/
|
||||
chown(path: string, uid: number, gid: number, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `chown`.
|
||||
* @param path
|
||||
* @param uid
|
||||
* @param gid
|
||||
*/
|
||||
chownSync(path: string, uid: number, gid: number): void;
|
||||
/**
|
||||
* Asynchronous `lchown`.
|
||||
* @param path
|
||||
* @param uid
|
||||
* @param gid
|
||||
* @param callback
|
||||
*/
|
||||
lchown(path: string, uid: number, gid: number, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `lchown`.
|
||||
* @param path
|
||||
* @param uid
|
||||
* @param gid
|
||||
*/
|
||||
lchownSync(path: string, uid: number, gid: number): void;
|
||||
/**
|
||||
* Asynchronous `chmod`.
|
||||
* @param path
|
||||
* @param mode
|
||||
* @param callback
|
||||
*/
|
||||
chmod(path: string, mode: number | string, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `chmod`.
|
||||
* @param path
|
||||
* @param mode
|
||||
*/
|
||||
chmodSync(path: string, mode: string | number): void;
|
||||
/**
|
||||
* Asynchronous `lchmod`.
|
||||
* @param path
|
||||
* @param mode
|
||||
* @param callback
|
||||
*/
|
||||
lchmod(path: string, mode: number | string, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous `lchmod`.
|
||||
* @param path
|
||||
* @param mode
|
||||
*/
|
||||
lchmodSync(path: string, mode: number | string): void;
|
||||
/**
|
||||
* Change file timestamps of the file referenced by the supplied path.
|
||||
* @param path
|
||||
* @param atime
|
||||
* @param mtime
|
||||
* @param callback
|
||||
*/
|
||||
utimes(path: string, atime: number | Date, mtime: number | Date, cb?: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Change file timestamps of the file referenced by the supplied path.
|
||||
* @param path
|
||||
* @param atime
|
||||
* @param mtime
|
||||
*/
|
||||
utimesSync(path: string, atime: number | Date, mtime: number | Date): void;
|
||||
/**
|
||||
* Asynchronous `realpath`. The callback gets two arguments
|
||||
* `(err, resolvedPath)`. May use `process.cwd` to resolve relative paths.
|
||||
*
|
||||
* @example Usage example
|
||||
* let cache = {'/etc':'/private/etc'};
|
||||
* fs.realpath('/etc/passwd', cache, function (err, resolvedPath) {
|
||||
* if (err) throw err;
|
||||
* console.log(resolvedPath);
|
||||
* });
|
||||
*
|
||||
* @param path
|
||||
* @param cache An object literal of mapped paths that can be used to
|
||||
* force a specific path resolution or avoid additional `fs.stat` calls for
|
||||
* known real paths.
|
||||
* @param callback
|
||||
*/
|
||||
realpath(path: string, cb?: BFSCallback<string>): void;
|
||||
realpath(path: string, cache: {
|
||||
[path: string]: string;
|
||||
}, cb: BFSCallback<string>): void;
|
||||
/**
|
||||
* Synchronous `realpath`.
|
||||
* @param path
|
||||
* @param cache An object literal of mapped paths that can be used to
|
||||
* force a specific path resolution or avoid additional `fs.stat` calls for
|
||||
* known real paths.
|
||||
* @return [String]
|
||||
*/
|
||||
realpathSync(path: string, cache?: {
|
||||
[path: string]: string;
|
||||
}): string;
|
||||
watchFile(filename: string, listener: (curr: Stats, prev: Stats) => void): void;
|
||||
watchFile(filename: string, options: {
|
||||
persistent?: boolean;
|
||||
interval?: number;
|
||||
}, listener: (curr: Stats, prev: Stats) => void): void;
|
||||
unwatchFile(filename: string, listener?: (curr: Stats, prev: Stats) => void): void;
|
||||
watch(filename: string, listener?: (event: string, filename: string) => any): _fs.FSWatcher;
|
||||
watch(filename: string, options: {
|
||||
persistent?: boolean;
|
||||
}, listener?: (event: string, filename: string) => any): _fs.FSWatcher;
|
||||
access(path: string, callback: (err: ApiError) => void): void;
|
||||
access(path: string, mode: number, callback: (err: ApiError) => void): void;
|
||||
accessSync(path: string, mode?: number): void;
|
||||
createReadStream(path: string, options?: {
|
||||
flags?: string;
|
||||
encoding?: string;
|
||||
fd?: number;
|
||||
mode?: number;
|
||||
autoClose?: boolean;
|
||||
}): _fs.ReadStream;
|
||||
createWriteStream(path: string, options?: {
|
||||
flags?: string;
|
||||
encoding?: string;
|
||||
fd?: number;
|
||||
mode?: number;
|
||||
}): _fs.WriteStream;
|
||||
/**
|
||||
* For unit testing. Passes all incoming callbacks to cbWrapper for wrapping.
|
||||
*/
|
||||
wrapCallbacks(cbWrapper: (cb: Function, args: number) => Function): void;
|
||||
private getFdForFile;
|
||||
private fd2file;
|
||||
private closeFd;
|
||||
}
|
||||
export interface FSModule extends FS {
|
||||
/**
|
||||
* The FS constructor.
|
||||
*/
|
||||
FS: typeof FS;
|
||||
/**
|
||||
* The FS.Stats constructor.
|
||||
*/
|
||||
Stats: typeof Stats;
|
||||
/**
|
||||
* Retrieve the FS object backing the fs module.
|
||||
*/
|
||||
getFSModule(): FS;
|
||||
/**
|
||||
* Set the FS object backing the fs module.
|
||||
*/
|
||||
changeFSModule(newFs: FS): void;
|
||||
/**
|
||||
* Accessors
|
||||
*/
|
||||
F_OK: number;
|
||||
R_OK: number;
|
||||
W_OK: number;
|
||||
X_OK: number;
|
||||
}
|
||||
1320
sandpack-generated/static/browserfs11/node/core/FS.js
Normal file
1320
sandpack-generated/static/browserfs11/node/core/FS.js
Normal file
File diff suppressed because it is too large
Load Diff
78
sandpack-generated/static/browserfs11/node/core/api_error.d.ts
vendored
Normal file
78
sandpack-generated/static/browserfs11/node/core/api_error.d.ts
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
/// <reference types="node" />
|
||||
/**
|
||||
* Standard libc error codes. Add more to this enum and ErrorStrings as they are
|
||||
* needed.
|
||||
* @url http://www.gnu.org/software/libc/manual/html_node/Error-Codes.html
|
||||
*/
|
||||
export declare enum ErrorCode {
|
||||
EPERM = 1,
|
||||
ENOENT = 2,
|
||||
EIO = 5,
|
||||
EBADF = 9,
|
||||
EACCES = 13,
|
||||
EBUSY = 16,
|
||||
EEXIST = 17,
|
||||
ENOTDIR = 20,
|
||||
EISDIR = 21,
|
||||
EINVAL = 22,
|
||||
EFBIG = 27,
|
||||
ENOSPC = 28,
|
||||
EROFS = 30,
|
||||
ENOTEMPTY = 39,
|
||||
ENOTSUP = 95
|
||||
}
|
||||
/**
|
||||
* Strings associated with each error code.
|
||||
* @hidden
|
||||
*/
|
||||
export declare const ErrorStrings: {
|
||||
[code: string]: string;
|
||||
[code: number]: string;
|
||||
};
|
||||
/**
|
||||
* Represents a BrowserFS error. Passed back to applications after a failed
|
||||
* call to the BrowserFS API.
|
||||
*/
|
||||
export declare class ApiError extends Error implements NodeJS.ErrnoException {
|
||||
static fromJSON(json: any): ApiError;
|
||||
/**
|
||||
* Creates an ApiError object from a buffer.
|
||||
*/
|
||||
static fromBuffer(buffer: Buffer, i?: number): ApiError;
|
||||
static FileError(code: ErrorCode, p: string): ApiError;
|
||||
static ENOENT(path: string): ApiError;
|
||||
static EEXIST(path: string): ApiError;
|
||||
static EISDIR(path: string): ApiError;
|
||||
static ENOTDIR(path: string): ApiError;
|
||||
static EPERM(path: string): ApiError;
|
||||
static ENOTEMPTY(path: string): ApiError;
|
||||
errno: ErrorCode;
|
||||
code: string;
|
||||
path: string | undefined;
|
||||
syscall: string;
|
||||
stack: string | undefined;
|
||||
/**
|
||||
* Represents a BrowserFS error. Passed back to applications after a failed
|
||||
* call to the BrowserFS API.
|
||||
*
|
||||
* Error codes mirror those returned by regular Unix file operations, which is
|
||||
* what Node returns.
|
||||
* @constructor ApiError
|
||||
* @param type The type of the error.
|
||||
* @param [message] A descriptive error message.
|
||||
*/
|
||||
constructor(type: ErrorCode, message?: string, path?: string);
|
||||
/**
|
||||
* @return A friendly error message.
|
||||
*/
|
||||
toString(): string;
|
||||
toJSON(): any;
|
||||
/**
|
||||
* Writes the API error into a buffer.
|
||||
*/
|
||||
writeToBuffer(buffer?: Buffer, i?: number): Buffer;
|
||||
/**
|
||||
* The size of the API error in buffer-form in bytes.
|
||||
*/
|
||||
bufferSize(): number;
|
||||
}
|
||||
164
sandpack-generated/static/browserfs11/node/core/api_error.js
Normal file
164
sandpack-generated/static/browserfs11/node/core/api_error.js
Normal file
@@ -0,0 +1,164 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ApiError = exports.ErrorStrings = exports.ErrorCode = void 0;
|
||||
/**
|
||||
* Standard libc error codes. Add more to this enum and ErrorStrings as they are
|
||||
* needed.
|
||||
* @url http://www.gnu.org/software/libc/manual/html_node/Error-Codes.html
|
||||
*/
|
||||
var ErrorCode;
|
||||
(function (ErrorCode) {
|
||||
ErrorCode[ErrorCode["EPERM"] = 1] = "EPERM";
|
||||
ErrorCode[ErrorCode["ENOENT"] = 2] = "ENOENT";
|
||||
ErrorCode[ErrorCode["EIO"] = 5] = "EIO";
|
||||
ErrorCode[ErrorCode["EBADF"] = 9] = "EBADF";
|
||||
ErrorCode[ErrorCode["EACCES"] = 13] = "EACCES";
|
||||
ErrorCode[ErrorCode["EBUSY"] = 16] = "EBUSY";
|
||||
ErrorCode[ErrorCode["EEXIST"] = 17] = "EEXIST";
|
||||
ErrorCode[ErrorCode["ENOTDIR"] = 20] = "ENOTDIR";
|
||||
ErrorCode[ErrorCode["EISDIR"] = 21] = "EISDIR";
|
||||
ErrorCode[ErrorCode["EINVAL"] = 22] = "EINVAL";
|
||||
ErrorCode[ErrorCode["EFBIG"] = 27] = "EFBIG";
|
||||
ErrorCode[ErrorCode["ENOSPC"] = 28] = "ENOSPC";
|
||||
ErrorCode[ErrorCode["EROFS"] = 30] = "EROFS";
|
||||
ErrorCode[ErrorCode["ENOTEMPTY"] = 39] = "ENOTEMPTY";
|
||||
ErrorCode[ErrorCode["ENOTSUP"] = 95] = "ENOTSUP";
|
||||
})(ErrorCode || (exports.ErrorCode = ErrorCode = {}));
|
||||
/* tslint:disable:variable-name */
|
||||
/**
|
||||
* Strings associated with each error code.
|
||||
* @hidden
|
||||
*/
|
||||
exports.ErrorStrings = {};
|
||||
exports.ErrorStrings[ErrorCode.EPERM] = 'Operation not permitted.';
|
||||
exports.ErrorStrings[ErrorCode.ENOENT] = 'No such file or directory.';
|
||||
exports.ErrorStrings[ErrorCode.EIO] = 'Input/output error.';
|
||||
exports.ErrorStrings[ErrorCode.EBADF] = 'Bad file descriptor.';
|
||||
exports.ErrorStrings[ErrorCode.EACCES] = 'Permission denied.';
|
||||
exports.ErrorStrings[ErrorCode.EBUSY] = 'Resource busy or locked.';
|
||||
exports.ErrorStrings[ErrorCode.EEXIST] = 'File exists.';
|
||||
exports.ErrorStrings[ErrorCode.ENOTDIR] = 'File is not a directory.';
|
||||
exports.ErrorStrings[ErrorCode.EISDIR] = 'File is a directory.';
|
||||
exports.ErrorStrings[ErrorCode.EINVAL] = 'Invalid argument.';
|
||||
exports.ErrorStrings[ErrorCode.EFBIG] = 'File is too big.';
|
||||
exports.ErrorStrings[ErrorCode.ENOSPC] = 'No space left on disk.';
|
||||
exports.ErrorStrings[ErrorCode.EROFS] = 'Cannot modify a read-only file system.';
|
||||
exports.ErrorStrings[ErrorCode.ENOTEMPTY] = 'Directory is not empty.';
|
||||
exports.ErrorStrings[ErrorCode.ENOTSUP] = 'Operation is not supported.';
|
||||
/* tslint:enable:variable-name */
|
||||
/**
|
||||
* Represents a BrowserFS error. Passed back to applications after a failed
|
||||
* call to the BrowserFS API.
|
||||
*/
|
||||
var ApiError = /** @class */ (function (_super) {
|
||||
__extends(ApiError, _super);
|
||||
/**
|
||||
* Represents a BrowserFS error. Passed back to applications after a failed
|
||||
* call to the BrowserFS API.
|
||||
*
|
||||
* Error codes mirror those returned by regular Unix file operations, which is
|
||||
* what Node returns.
|
||||
* @constructor ApiError
|
||||
* @param type The type of the error.
|
||||
* @param [message] A descriptive error message.
|
||||
*/
|
||||
function ApiError(type, message, path) {
|
||||
if (message === void 0) { message = exports.ErrorStrings[type]; }
|
||||
var _this = _super.call(this, message) || this;
|
||||
// Unsupported.
|
||||
_this.syscall = "";
|
||||
_this.errno = type;
|
||||
_this.code = ErrorCode[type];
|
||||
_this.path = path;
|
||||
_this.stack = new Error().stack;
|
||||
_this.message = "Error: ".concat(_this.code, ": ").concat(message).concat(_this.path ? ", '".concat(_this.path, "'") : '');
|
||||
return _this;
|
||||
}
|
||||
ApiError.fromJSON = function (json) {
|
||||
var err = new ApiError(0);
|
||||
err.errno = json.errno;
|
||||
err.code = json.code;
|
||||
err.path = json.path;
|
||||
err.stack = json.stack;
|
||||
err.message = json.message;
|
||||
return err;
|
||||
};
|
||||
/**
|
||||
* Creates an ApiError object from a buffer.
|
||||
*/
|
||||
ApiError.fromBuffer = function (buffer, i) {
|
||||
if (i === void 0) { i = 0; }
|
||||
return ApiError.fromJSON(JSON.parse(buffer.toString('utf8', i + 4, i + 4 + buffer.readUInt32LE(i))));
|
||||
};
|
||||
ApiError.FileError = function (code, p) {
|
||||
return new ApiError(code, exports.ErrorStrings[code], p);
|
||||
};
|
||||
ApiError.ENOENT = function (path) {
|
||||
return this.FileError(ErrorCode.ENOENT, path);
|
||||
};
|
||||
ApiError.EEXIST = function (path) {
|
||||
return this.FileError(ErrorCode.EEXIST, path);
|
||||
};
|
||||
ApiError.EISDIR = function (path) {
|
||||
return this.FileError(ErrorCode.EISDIR, path);
|
||||
};
|
||||
ApiError.ENOTDIR = function (path) {
|
||||
return this.FileError(ErrorCode.ENOTDIR, path);
|
||||
};
|
||||
ApiError.EPERM = function (path) {
|
||||
return this.FileError(ErrorCode.EPERM, path);
|
||||
};
|
||||
ApiError.ENOTEMPTY = function (path) {
|
||||
return this.FileError(ErrorCode.ENOTEMPTY, path);
|
||||
};
|
||||
/**
|
||||
* @return A friendly error message.
|
||||
*/
|
||||
ApiError.prototype.toString = function () {
|
||||
return this.message;
|
||||
};
|
||||
ApiError.prototype.toJSON = function () {
|
||||
return {
|
||||
errno: this.errno,
|
||||
code: this.code,
|
||||
path: this.path,
|
||||
stack: this.stack,
|
||||
message: this.message
|
||||
};
|
||||
};
|
||||
/**
|
||||
* Writes the API error into a buffer.
|
||||
*/
|
||||
ApiError.prototype.writeToBuffer = function (buffer, i) {
|
||||
if (buffer === void 0) { buffer = Buffer.alloc(this.bufferSize()); }
|
||||
if (i === void 0) { i = 0; }
|
||||
var bytesWritten = buffer.write(JSON.stringify(this.toJSON()), i + 4);
|
||||
buffer.writeUInt32LE(bytesWritten, i);
|
||||
return buffer;
|
||||
};
|
||||
/**
|
||||
* The size of the API error in buffer-form in bytes.
|
||||
*/
|
||||
ApiError.prototype.bufferSize = function () {
|
||||
// 4 bytes for string length.
|
||||
return 4 + Buffer.byteLength(JSON.stringify(this.toJSON()));
|
||||
};
|
||||
return ApiError;
|
||||
}(Error));
|
||||
exports.ApiError = ApiError;
|
||||
//# sourceMappingURL=api_error.js.map
|
||||
41
sandpack-generated/static/browserfs11/node/core/backends.d.ts
vendored
Normal file
41
sandpack-generated/static/browserfs11/node/core/backends.d.ts
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
import AsyncMirror from '../backend/AsyncMirror';
|
||||
import BundledHTTPRequest from '../backend/BundledHTTPRequest';
|
||||
import CodeSandboxEditorFS from '../backend/CodeSandboxEditorFS';
|
||||
import CodeSandboxFS from '../backend/CodeSandboxFS';
|
||||
import DynamicHTTPRequest from '../backend/DynamicHTTPRequest';
|
||||
import FolderAdapter from '../backend/FolderAdapter';
|
||||
import HTTPRequest from '../backend/HTTPRequest';
|
||||
import IndexedDB from '../backend/IndexedDB';
|
||||
import InMemory from '../backend/InMemory';
|
||||
import LocalStorage from '../backend/LocalStorage';
|
||||
import MountableFileSystem from '../backend/MountableFileSystem';
|
||||
import OverlayFS from '../backend/OverlayFS';
|
||||
import UNPKGRequest from '../backend/UNPKGRequest';
|
||||
import JSDelivrRequest from '../backend/JSDelivrRequest';
|
||||
import WebsocketFS from '../backend/WebsocketFS';
|
||||
import WorkerFS from '../backend/WorkerFS';
|
||||
import ZipFS from '../backend/ZipFS';
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
declare const Backends: {
|
||||
AsyncMirror: typeof AsyncMirror;
|
||||
FolderAdapter: typeof FolderAdapter;
|
||||
InMemory: typeof InMemory;
|
||||
IndexedDB: typeof IndexedDB;
|
||||
OverlayFS: typeof OverlayFS;
|
||||
LocalStorage: typeof LocalStorage;
|
||||
MountableFileSystem: typeof MountableFileSystem;
|
||||
WorkerFS: typeof WorkerFS;
|
||||
BundledHTTPRequest: typeof BundledHTTPRequest;
|
||||
HTTPRequest: typeof HTTPRequest;
|
||||
UNPKGRequest: typeof UNPKGRequest;
|
||||
JSDelivrRequest: typeof JSDelivrRequest;
|
||||
XmlHttpRequest: typeof HTTPRequest;
|
||||
ZipFS: typeof ZipFS;
|
||||
CodeSandboxFS: typeof CodeSandboxFS;
|
||||
CodeSandboxEditorFS: typeof CodeSandboxEditorFS;
|
||||
WebsocketFS: typeof WebsocketFS;
|
||||
DynamicHTTPRequest: typeof DynamicHTTPRequest;
|
||||
};
|
||||
export default Backends;
|
||||
52
sandpack-generated/static/browserfs11/node/core/backends.js
Normal file
52
sandpack-generated/static/browserfs11/node/core/backends.js
Normal file
@@ -0,0 +1,52 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var AsyncMirror_1 = require("../backend/AsyncMirror");
|
||||
var BundledHTTPRequest_1 = require("../backend/BundledHTTPRequest");
|
||||
var CodeSandboxEditorFS_1 = require("../backend/CodeSandboxEditorFS");
|
||||
// import IsoFS from '../backend/IsoFS';
|
||||
var CodeSandboxFS_1 = require("../backend/CodeSandboxFS");
|
||||
var DynamicHTTPRequest_1 = require("../backend/DynamicHTTPRequest");
|
||||
// import Dropbox from '../backend/Dropbox';
|
||||
// import Emscripten from '../backend/Emscripten';
|
||||
var FolderAdapter_1 = require("../backend/FolderAdapter");
|
||||
var HTTPRequest_1 = require("../backend/HTTPRequest");
|
||||
var IndexedDB_1 = require("../backend/IndexedDB");
|
||||
// import HTML5FS from '../backend/HTML5FS';
|
||||
var InMemory_1 = require("../backend/InMemory");
|
||||
var LocalStorage_1 = require("../backend/LocalStorage");
|
||||
var MountableFileSystem_1 = require("../backend/MountableFileSystem");
|
||||
var OverlayFS_1 = require("../backend/OverlayFS");
|
||||
var UNPKGRequest_1 = require("../backend/UNPKGRequest");
|
||||
var JSDelivrRequest_1 = require("../backend/JSDelivrRequest");
|
||||
var WebsocketFS_1 = require("../backend/WebsocketFS");
|
||||
var WorkerFS_1 = require("../backend/WorkerFS");
|
||||
var ZipFS_1 = require("../backend/ZipFS");
|
||||
var util_1 = require("./util");
|
||||
// Monkey-patch `Create` functions to check options before file system initialization.
|
||||
[AsyncMirror_1.default, InMemory_1.default, IndexedDB_1.default, FolderAdapter_1.default, OverlayFS_1.default, LocalStorage_1.default, MountableFileSystem_1.default, WorkerFS_1.default, BundledHTTPRequest_1.default, HTTPRequest_1.default, UNPKGRequest_1.default, JSDelivrRequest_1.default, ZipFS_1.default, CodeSandboxFS_1.default, CodeSandboxEditorFS_1.default, WebsocketFS_1.default, DynamicHTTPRequest_1.default].forEach(function (fsType) {
|
||||
var create = fsType.Create;
|
||||
fsType.Create = function (opts, cb) {
|
||||
var oneArg = typeof (opts) === 'function';
|
||||
var normalizedCb = oneArg ? opts : cb;
|
||||
var normalizedOpts = oneArg ? {} : opts;
|
||||
function wrappedCb(e) {
|
||||
if (e) {
|
||||
normalizedCb(e);
|
||||
}
|
||||
else {
|
||||
create.call(fsType, normalizedOpts, normalizedCb);
|
||||
}
|
||||
}
|
||||
(0, util_1.checkOptions)(fsType, normalizedOpts, wrappedCb);
|
||||
};
|
||||
});
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var Backends = { AsyncMirror: AsyncMirror_1.default, FolderAdapter: FolderAdapter_1.default, InMemory: InMemory_1.default, IndexedDB: IndexedDB_1.default, OverlayFS: OverlayFS_1.default, LocalStorage: LocalStorage_1.default, MountableFileSystem: MountableFileSystem_1.default, WorkerFS: WorkerFS_1.default, BundledHTTPRequest: BundledHTTPRequest_1.default, HTTPRequest: HTTPRequest_1.default, UNPKGRequest: UNPKGRequest_1.default, JSDelivrRequest: JSDelivrRequest_1.default, XmlHttpRequest: HTTPRequest_1.default, ZipFS: ZipFS_1.default, CodeSandboxFS: CodeSandboxFS_1.default, CodeSandboxEditorFS: CodeSandboxEditorFS_1.default, WebsocketFS: WebsocketFS_1.default, DynamicHTTPRequest: DynamicHTTPRequest_1.default };
|
||||
// Make sure all backends cast to FileSystemConstructor (for type checking)
|
||||
var _ = Backends;
|
||||
// tslint:disable-next-line:no-unused-expression
|
||||
_; // eslint-disable-line no-unused-expressions
|
||||
exports.default = Backends;
|
||||
//# sourceMappingURL=backends.js.map
|
||||
75
sandpack-generated/static/browserfs11/node/core/browserfs.d.ts
vendored
Normal file
75
sandpack-generated/static/browserfs11/node/core/browserfs.d.ts
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
/**
|
||||
* BrowserFS's main module. This is exposed in the browser via the BrowserFS global.
|
||||
* Due to limitations in typedoc, we document these functions in ./typedoc.ts.
|
||||
*/
|
||||
/// <reference types="node" />
|
||||
/// <reference types="node" />
|
||||
/// <reference types="node" />
|
||||
import * as buffer from 'buffer';
|
||||
import fs from './node_fs';
|
||||
import * as path from 'path';
|
||||
import { FileSystemConstructor, FileSystem, BFSOneArgCallback, BFSCallback } from './file_system';
|
||||
import EmscriptenFS from '../generic/emscripten_fs';
|
||||
import Backends from './backends';
|
||||
import * as BFSUtils from './util';
|
||||
import * as Errors from './api_error';
|
||||
import setImmediate from '../generic/setImmediate';
|
||||
/**
|
||||
* Installs BFSRequire as global `require`, a Node Buffer polyfill as the global `Buffer` variable,
|
||||
* and a Node process polyfill as the global `process` variable.
|
||||
*/
|
||||
export declare function install(obj: any): void;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export declare function registerFileSystem(name: string, fs: FileSystemConstructor): void;
|
||||
/**
|
||||
* Polyfill for CommonJS `require()`. For example, can call `BFSRequire('fs')` to get a 'fs' module polyfill.
|
||||
*/
|
||||
export declare function BFSRequire(module: 'fs'): typeof fs;
|
||||
export declare function BFSRequire(module: 'path'): typeof path;
|
||||
export declare function BFSRequire(module: 'buffer'): typeof buffer;
|
||||
export declare function BFSRequire(module: 'process'): typeof process;
|
||||
export declare function BFSRequire(module: 'bfs_utils'): typeof BFSUtils;
|
||||
export declare function BFSRequire(module: string): any;
|
||||
/**
|
||||
* Initializes BrowserFS with the given root file system.
|
||||
*/
|
||||
export declare function initialize(rootfs: FileSystem): FileSystem;
|
||||
/**
|
||||
* Creates a file system with the given configuration, and initializes BrowserFS with it.
|
||||
* See the FileSystemConfiguration type for more info on the configuration object.
|
||||
*/
|
||||
export declare function configure(config: FileSystemConfiguration, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Specifies a file system backend type and its options.
|
||||
*
|
||||
* Individual options can recursively contain FileSystemConfiguration objects for
|
||||
* option values that require file systems.
|
||||
*
|
||||
* For example, to mirror Dropbox to LocalStorage with AsyncMirror, use the following
|
||||
* object:
|
||||
*
|
||||
* ```javascript
|
||||
* var config = {
|
||||
* fs: "AsyncMirror",
|
||||
* options: {
|
||||
* sync: {fs: "LocalStorage"},
|
||||
* async: {fs: "Dropbox", options: {client: anAuthenticatedDropboxSDKClient }}
|
||||
* }
|
||||
* };
|
||||
* ```
|
||||
*
|
||||
* The option object for each file system corresponds to that file system's option object passed to its `Create()` method.
|
||||
*/
|
||||
export interface FileSystemConfiguration {
|
||||
fs: string;
|
||||
options?: any;
|
||||
}
|
||||
/**
|
||||
* Retrieve a file system with the given configuration.
|
||||
* @param config A FileSystemConfiguration object. See FileSystemConfiguration for details.
|
||||
* @param cb Called when the file system is constructed, or when an error occurs.
|
||||
*/
|
||||
export declare function getFileSystem(config: FileSystemConfiguration, cb: BFSCallback<FileSystem>): void;
|
||||
export { EmscriptenFS, Backends as FileSystem, Errors, setImmediate };
|
||||
149
sandpack-generated/static/browserfs11/node/core/browserfs.js
Normal file
149
sandpack-generated/static/browserfs11/node/core/browserfs.js
Normal file
@@ -0,0 +1,149 @@
|
||||
"use strict";
|
||||
/**
|
||||
* BrowserFS's main module. This is exposed in the browser via the BrowserFS global.
|
||||
* Due to limitations in typedoc, we document these functions in ./typedoc.ts.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.setImmediate = exports.Errors = exports.FileSystem = exports.EmscriptenFS = exports.getFileSystem = exports.configure = exports.initialize = exports.BFSRequire = exports.registerFileSystem = exports.install = void 0;
|
||||
var buffer = require("buffer");
|
||||
var node_fs_1 = require("./node_fs");
|
||||
var path = require("path");
|
||||
var emscripten_fs_1 = require("../generic/emscripten_fs");
|
||||
exports.EmscriptenFS = emscripten_fs_1.default;
|
||||
var backends_1 = require("./backends");
|
||||
exports.FileSystem = backends_1.default;
|
||||
var BFSUtils = require("./util");
|
||||
var Errors = require("./api_error");
|
||||
exports.Errors = Errors;
|
||||
var setImmediate_1 = require("../generic/setImmediate");
|
||||
exports.setImmediate = setImmediate_1.default;
|
||||
if (process['initializeTTYs']) {
|
||||
process['initializeTTYs']();
|
||||
}
|
||||
/**
|
||||
* Installs BFSRequire as global `require`, a Node Buffer polyfill as the global `Buffer` variable,
|
||||
* and a Node process polyfill as the global `process` variable.
|
||||
*/
|
||||
function install(obj) {
|
||||
obj.Buffer = Buffer;
|
||||
obj.process = process;
|
||||
var oldRequire = obj.require ? obj.require : null;
|
||||
// Monkey-patch require for Node-style code.
|
||||
obj.require = function (arg) {
|
||||
var rv = BFSRequire(arg);
|
||||
if (!rv) {
|
||||
return oldRequire.apply(null, Array.prototype.slice.call(arguments, 0));
|
||||
}
|
||||
else {
|
||||
return rv;
|
||||
}
|
||||
};
|
||||
}
|
||||
exports.install = install;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function registerFileSystem(name, fs) {
|
||||
backends_1.default[name] = fs;
|
||||
}
|
||||
exports.registerFileSystem = registerFileSystem;
|
||||
function BFSRequire(module) {
|
||||
switch (module) {
|
||||
case 'fs':
|
||||
return node_fs_1.default;
|
||||
case 'path':
|
||||
return path;
|
||||
case 'buffer':
|
||||
// The 'buffer' module has 'Buffer' as a property.
|
||||
return buffer;
|
||||
case 'process':
|
||||
return process;
|
||||
case 'bfs_utils':
|
||||
return BFSUtils;
|
||||
default:
|
||||
return backends_1.default[module];
|
||||
}
|
||||
}
|
||||
exports.BFSRequire = BFSRequire;
|
||||
/**
|
||||
* Initializes BrowserFS with the given root file system.
|
||||
*/
|
||||
function initialize(rootfs) {
|
||||
return node_fs_1.default.initialize(rootfs);
|
||||
}
|
||||
exports.initialize = initialize;
|
||||
/**
|
||||
* Creates a file system with the given configuration, and initializes BrowserFS with it.
|
||||
* See the FileSystemConfiguration type for more info on the configuration object.
|
||||
*/
|
||||
function configure(config, cb) {
|
||||
getFileSystem(config, function (e, fs) {
|
||||
if (fs) {
|
||||
initialize(fs);
|
||||
cb();
|
||||
}
|
||||
else {
|
||||
cb(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.configure = configure;
|
||||
/**
|
||||
* Retrieve a file system with the given configuration.
|
||||
* @param config A FileSystemConfiguration object. See FileSystemConfiguration for details.
|
||||
* @param cb Called when the file system is constructed, or when an error occurs.
|
||||
*/
|
||||
function getFileSystem(config, cb) {
|
||||
var fsName = config['fs'];
|
||||
if (!fsName) {
|
||||
return cb(new Errors.ApiError(Errors.ErrorCode.EPERM, 'Missing "fs" property on configuration object.'));
|
||||
}
|
||||
var options = config['options'];
|
||||
var waitCount = 0;
|
||||
var called = false;
|
||||
function finish() {
|
||||
if (!called) {
|
||||
called = true;
|
||||
var fsc = backends_1.default[fsName];
|
||||
if (!fsc) {
|
||||
cb(new Errors.ApiError(Errors.ErrorCode.EPERM, "File system ".concat(fsName, " is not available in BrowserFS.")));
|
||||
}
|
||||
else {
|
||||
fsc.Create(options, cb);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (options !== null && typeof (options) === "object") {
|
||||
var finishedIterating_1 = false;
|
||||
var props = Object.keys(options).filter(function (k) { return k !== 'fs'; });
|
||||
// Check recursively if other fields have 'fs' properties.
|
||||
props.forEach(function (p) {
|
||||
var d = options[p];
|
||||
if (d !== null && typeof (d) === "object" && d['fs']) {
|
||||
waitCount++;
|
||||
getFileSystem(d, function (e, fs) {
|
||||
waitCount--;
|
||||
if (e) {
|
||||
if (called) {
|
||||
return;
|
||||
}
|
||||
called = true;
|
||||
cb(e);
|
||||
}
|
||||
else {
|
||||
options[p] = fs;
|
||||
if (waitCount === 0 && finishedIterating_1) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
finishedIterating_1 = true;
|
||||
}
|
||||
if (waitCount === 0) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
exports.getFileSystem = getFileSystem;
|
||||
//# sourceMappingURL=browserfs.js.map
|
||||
143
sandpack-generated/static/browserfs11/node/core/file.d.ts
vendored
Normal file
143
sandpack-generated/static/browserfs11/node/core/file.d.ts
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
/// <reference types="node" />
|
||||
import Stats from './node_fs_stats';
|
||||
import { BFSCallback, BFSOneArgCallback, BFSThreeArgCallback } from './file_system';
|
||||
export interface File {
|
||||
/**
|
||||
* **Core**: Get the current file position.
|
||||
*/
|
||||
getPos(): number | undefined;
|
||||
/**
|
||||
* **Core**: Asynchronous `stat`.
|
||||
*/
|
||||
stat(cb: BFSCallback<Stats>): void;
|
||||
/**
|
||||
* **Core**: Synchronous `stat`.
|
||||
*/
|
||||
statSync(): Stats;
|
||||
/**
|
||||
* **Core**: Asynchronous close.
|
||||
*/
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous close.
|
||||
*/
|
||||
closeSync(): void;
|
||||
/**
|
||||
* **Core**: Asynchronous truncate.
|
||||
*/
|
||||
truncate(len: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous truncate.
|
||||
*/
|
||||
truncateSync(len: number): void;
|
||||
/**
|
||||
* **Core**: Asynchronous sync.
|
||||
*/
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous sync.
|
||||
*/
|
||||
syncSync(): void;
|
||||
/**
|
||||
* **Core**: Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.write multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param offset Offset in the buffer to start reading data from.
|
||||
* @param length The amount of bytes to write to the file.
|
||||
* @param position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @param cb The number specifies the number of bytes written into the file.
|
||||
*/
|
||||
write(buffer: Buffer, offset: number, length: number, position: number | null, cb: BFSThreeArgCallback<number, Buffer>): void;
|
||||
/**
|
||||
* **Core**: Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.writeSync multiple times on the same file
|
||||
* without waiting for it to return.
|
||||
* @param buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param offset Offset in the buffer to start reading data from.
|
||||
* @param length The amount of bytes to write to the file.
|
||||
* @param position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
*/
|
||||
writeSync(buffer: Buffer, offset: number, length: number, position: number | null): number;
|
||||
/**
|
||||
* **Core**: Read data from the file.
|
||||
* @param buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param length An integer specifying the number of bytes to read.
|
||||
* @param position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @param cb The number is the number of bytes read
|
||||
*/
|
||||
read(buffer: Buffer, offset: number, length: number, position: number | null, cb: BFSThreeArgCallback<number, Buffer>): void;
|
||||
/**
|
||||
* **Core**: Read data from the file.
|
||||
* @param buffer The buffer that the data will be written to.
|
||||
* @param offset The offset within the buffer where writing will start.
|
||||
* @param length An integer specifying the number of bytes to read.
|
||||
* @param position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
*/
|
||||
readSync(buffer: Buffer, offset: number, length: number, position: number): number;
|
||||
/**
|
||||
* **Supplementary**: Asynchronous `datasync`.
|
||||
*
|
||||
* Default implementation maps to `sync`.
|
||||
*/
|
||||
datasync(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Supplementary**: Synchronous `datasync`.
|
||||
*
|
||||
* Default implementation maps to `syncSync`.
|
||||
*/
|
||||
datasyncSync(): void;
|
||||
/**
|
||||
* **Optional**: Asynchronous `chown`.
|
||||
*/
|
||||
chown(uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Optional**: Synchronous `chown`.
|
||||
*/
|
||||
chownSync(uid: number, gid: number): void;
|
||||
/**
|
||||
* **Optional**: Asynchronous `fchmod`.
|
||||
*/
|
||||
chmod(mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Optional**: Synchronous `fchmod`.
|
||||
*/
|
||||
chmodSync(mode: number): void;
|
||||
/**
|
||||
* **Optional**: Change the file timestamps of the file.
|
||||
*/
|
||||
utimes(atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Optional**: Change the file timestamps of the file.
|
||||
*/
|
||||
utimesSync(atime: Date, mtime: Date): void;
|
||||
}
|
||||
/**
|
||||
* Base class that contains shared implementations of functions for the file
|
||||
* object.
|
||||
*/
|
||||
export declare class BaseFile {
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
syncSync(): void;
|
||||
datasync(cb: BFSOneArgCallback): void;
|
||||
datasyncSync(): void;
|
||||
chown(uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
chownSync(uid: number, gid: number): void;
|
||||
chmod(mode: number, cb: BFSOneArgCallback): void;
|
||||
chmodSync(mode: number): void;
|
||||
utimes(atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
utimesSync(atime: Date, mtime: Date): void;
|
||||
}
|
||||
45
sandpack-generated/static/browserfs11/node/core/file.js
Normal file
45
sandpack-generated/static/browserfs11/node/core/file.js
Normal file
@@ -0,0 +1,45 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.BaseFile = void 0;
|
||||
var api_error_1 = require("./api_error");
|
||||
/**
|
||||
* Base class that contains shared implementations of functions for the file
|
||||
* object.
|
||||
*/
|
||||
var BaseFile = /** @class */ (function () {
|
||||
function BaseFile() {
|
||||
}
|
||||
BaseFile.prototype.sync = function (cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFile.prototype.syncSync = function () {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFile.prototype.datasync = function (cb) {
|
||||
this.sync(cb);
|
||||
};
|
||||
BaseFile.prototype.datasyncSync = function () {
|
||||
return this.syncSync();
|
||||
};
|
||||
BaseFile.prototype.chown = function (uid, gid, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFile.prototype.chownSync = function (uid, gid) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFile.prototype.chmod = function (mode, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFile.prototype.chmodSync = function (mode) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFile.prototype.utimes = function (atime, mtime, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFile.prototype.utimesSync = function (atime, mtime) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
return BaseFile;
|
||||
}());
|
||||
exports.BaseFile = BaseFile;
|
||||
//# sourceMappingURL=file.js.map
|
||||
80
sandpack-generated/static/browserfs11/node/core/file_flag.d.ts
vendored
Normal file
80
sandpack-generated/static/browserfs11/node/core/file_flag.d.ts
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
export declare enum ActionType {
|
||||
NOP = 0,
|
||||
THROW_EXCEPTION = 1,
|
||||
TRUNCATE_FILE = 2,
|
||||
CREATE_FILE = 3
|
||||
}
|
||||
/**
|
||||
* Represents one of the following file flags. A convenience object.
|
||||
*
|
||||
* * `'r'` - Open file for reading. An exception occurs if the file does not exist.
|
||||
* * `'r+'` - Open file for reading and writing. An exception occurs if the file does not exist.
|
||||
* * `'rs'` - Open file for reading in synchronous mode. Instructs the filesystem to not cache writes.
|
||||
* * `'rs+'` - Open file for reading and writing, and opens the file in synchronous mode.
|
||||
* * `'w'` - Open file for writing. The file is created (if it does not exist) or truncated (if it exists).
|
||||
* * `'wx'` - Like 'w' but opens the file in exclusive mode.
|
||||
* * `'w+'` - Open file for reading and writing. The file is created (if it does not exist) or truncated (if it exists).
|
||||
* * `'wx+'` - Like 'w+' but opens the file in exclusive mode.
|
||||
* * `'a'` - Open file for appending. The file is created if it does not exist.
|
||||
* * `'ax'` - Like 'a' but opens the file in exclusive mode.
|
||||
* * `'a+'` - Open file for reading and appending. The file is created if it does not exist.
|
||||
* * `'ax+'` - Like 'a+' but opens the file in exclusive mode.
|
||||
*
|
||||
* Exclusive mode ensures that the file path is newly created.
|
||||
*/
|
||||
export declare class FileFlag {
|
||||
private static flagCache;
|
||||
private static validFlagStrs;
|
||||
/**
|
||||
* Get an object representing the given file flag.
|
||||
* @param modeStr The string representing the flag
|
||||
* @return The FileFlag object representing the flag
|
||||
* @throw when the flag string is invalid
|
||||
*/
|
||||
static getFileFlag(flagStr: string): FileFlag;
|
||||
private flagStr;
|
||||
/**
|
||||
* This should never be called directly.
|
||||
* @param modeStr The string representing the mode
|
||||
* @throw when the mode string is invalid
|
||||
*/
|
||||
constructor(flagStr: string);
|
||||
/**
|
||||
* Get the underlying flag string for this flag.
|
||||
*/
|
||||
getFlagString(): string;
|
||||
/**
|
||||
* Returns true if the file is readable.
|
||||
*/
|
||||
isReadable(): boolean;
|
||||
/**
|
||||
* Returns true if the file is writeable.
|
||||
*/
|
||||
isWriteable(): boolean;
|
||||
/**
|
||||
* Returns true if the file mode should truncate.
|
||||
*/
|
||||
isTruncating(): boolean;
|
||||
/**
|
||||
* Returns true if the file is appendable.
|
||||
*/
|
||||
isAppendable(): boolean;
|
||||
/**
|
||||
* Returns true if the file is open in synchronous mode.
|
||||
*/
|
||||
isSynchronous(): boolean;
|
||||
/**
|
||||
* Returns true if the file is open in exclusive mode.
|
||||
*/
|
||||
isExclusive(): boolean;
|
||||
/**
|
||||
* Returns one of the static fields on this object that indicates the
|
||||
* appropriate response to the path existing.
|
||||
*/
|
||||
pathExistsAction(): ActionType;
|
||||
/**
|
||||
* Returns one of the static fields on this object that indicates the
|
||||
* appropriate response to the path not existing.
|
||||
*/
|
||||
pathNotExistsAction(): ActionType;
|
||||
}
|
||||
135
sandpack-generated/static/browserfs11/node/core/file_flag.js
Normal file
135
sandpack-generated/static/browserfs11/node/core/file_flag.js
Normal file
@@ -0,0 +1,135 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.FileFlag = exports.ActionType = void 0;
|
||||
var api_error_1 = require("./api_error");
|
||||
var ActionType;
|
||||
(function (ActionType) {
|
||||
// Indicates that the code should not do anything.
|
||||
ActionType[ActionType["NOP"] = 0] = "NOP";
|
||||
// Indicates that the code should throw an exception.
|
||||
ActionType[ActionType["THROW_EXCEPTION"] = 1] = "THROW_EXCEPTION";
|
||||
// Indicates that the code should truncate the file, but only if it is a file.
|
||||
ActionType[ActionType["TRUNCATE_FILE"] = 2] = "TRUNCATE_FILE";
|
||||
// Indicates that the code should create the file.
|
||||
ActionType[ActionType["CREATE_FILE"] = 3] = "CREATE_FILE";
|
||||
})(ActionType || (exports.ActionType = ActionType = {}));
|
||||
/**
|
||||
* Represents one of the following file flags. A convenience object.
|
||||
*
|
||||
* * `'r'` - Open file for reading. An exception occurs if the file does not exist.
|
||||
* * `'r+'` - Open file for reading and writing. An exception occurs if the file does not exist.
|
||||
* * `'rs'` - Open file for reading in synchronous mode. Instructs the filesystem to not cache writes.
|
||||
* * `'rs+'` - Open file for reading and writing, and opens the file in synchronous mode.
|
||||
* * `'w'` - Open file for writing. The file is created (if it does not exist) or truncated (if it exists).
|
||||
* * `'wx'` - Like 'w' but opens the file in exclusive mode.
|
||||
* * `'w+'` - Open file for reading and writing. The file is created (if it does not exist) or truncated (if it exists).
|
||||
* * `'wx+'` - Like 'w+' but opens the file in exclusive mode.
|
||||
* * `'a'` - Open file for appending. The file is created if it does not exist.
|
||||
* * `'ax'` - Like 'a' but opens the file in exclusive mode.
|
||||
* * `'a+'` - Open file for reading and appending. The file is created if it does not exist.
|
||||
* * `'ax+'` - Like 'a+' but opens the file in exclusive mode.
|
||||
*
|
||||
* Exclusive mode ensures that the file path is newly created.
|
||||
*/
|
||||
var FileFlag = /** @class */ (function () {
|
||||
/**
|
||||
* This should never be called directly.
|
||||
* @param modeStr The string representing the mode
|
||||
* @throw when the mode string is invalid
|
||||
*/
|
||||
function FileFlag(flagStr) {
|
||||
this.flagStr = flagStr;
|
||||
if (FileFlag.validFlagStrs.indexOf(flagStr) < 0) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid flag: " + flagStr);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get an object representing the given file flag.
|
||||
* @param modeStr The string representing the flag
|
||||
* @return The FileFlag object representing the flag
|
||||
* @throw when the flag string is invalid
|
||||
*/
|
||||
FileFlag.getFileFlag = function (flagStr) {
|
||||
// Check cache first.
|
||||
if (FileFlag.flagCache.hasOwnProperty(flagStr)) {
|
||||
return FileFlag.flagCache[flagStr];
|
||||
}
|
||||
return FileFlag.flagCache[flagStr] = new FileFlag(flagStr);
|
||||
};
|
||||
/**
|
||||
* Get the underlying flag string for this flag.
|
||||
*/
|
||||
FileFlag.prototype.getFlagString = function () {
|
||||
return this.flagStr;
|
||||
};
|
||||
/**
|
||||
* Returns true if the file is readable.
|
||||
*/
|
||||
FileFlag.prototype.isReadable = function () {
|
||||
return this.flagStr.indexOf('r') !== -1 || this.flagStr.indexOf('+') !== -1;
|
||||
};
|
||||
/**
|
||||
* Returns true if the file is writeable.
|
||||
*/
|
||||
FileFlag.prototype.isWriteable = function () {
|
||||
return this.flagStr.indexOf('w') !== -1 || this.flagStr.indexOf('a') !== -1 || this.flagStr.indexOf('+') !== -1;
|
||||
};
|
||||
/**
|
||||
* Returns true if the file mode should truncate.
|
||||
*/
|
||||
FileFlag.prototype.isTruncating = function () {
|
||||
return this.flagStr.indexOf('w') !== -1;
|
||||
};
|
||||
/**
|
||||
* Returns true if the file is appendable.
|
||||
*/
|
||||
FileFlag.prototype.isAppendable = function () {
|
||||
return this.flagStr.indexOf('a') !== -1;
|
||||
};
|
||||
/**
|
||||
* Returns true if the file is open in synchronous mode.
|
||||
*/
|
||||
FileFlag.prototype.isSynchronous = function () {
|
||||
return this.flagStr.indexOf('s') !== -1;
|
||||
};
|
||||
/**
|
||||
* Returns true if the file is open in exclusive mode.
|
||||
*/
|
||||
FileFlag.prototype.isExclusive = function () {
|
||||
return this.flagStr.indexOf('x') !== -1;
|
||||
};
|
||||
/**
|
||||
* Returns one of the static fields on this object that indicates the
|
||||
* appropriate response to the path existing.
|
||||
*/
|
||||
FileFlag.prototype.pathExistsAction = function () {
|
||||
if (this.isExclusive()) {
|
||||
return ActionType.THROW_EXCEPTION;
|
||||
}
|
||||
else if (this.isTruncating()) {
|
||||
return ActionType.TRUNCATE_FILE;
|
||||
}
|
||||
else {
|
||||
return ActionType.NOP;
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Returns one of the static fields on this object that indicates the
|
||||
* appropriate response to the path not existing.
|
||||
*/
|
||||
FileFlag.prototype.pathNotExistsAction = function () {
|
||||
if ((this.isWriteable() || this.isAppendable()) && this.flagStr !== 'r+') {
|
||||
return ActionType.CREATE_FILE;
|
||||
}
|
||||
else {
|
||||
return ActionType.THROW_EXCEPTION;
|
||||
}
|
||||
};
|
||||
// Contains cached FileMode instances.
|
||||
FileFlag.flagCache = {};
|
||||
// Array of valid mode strings.
|
||||
FileFlag.validFlagStrs = ['r', 'r+', 'rs', 'rs+', 'w', 'wx', 'w+', 'wx+', 'a', 'ax', 'a+', 'ax+'];
|
||||
return FileFlag;
|
||||
}());
|
||||
exports.FileFlag = FileFlag;
|
||||
//# sourceMappingURL=file_flag.js.map
|
||||
449
sandpack-generated/static/browserfs11/node/core/file_system.d.ts
vendored
Normal file
449
sandpack-generated/static/browserfs11/node/core/file_system.d.ts
vendored
Normal file
@@ -0,0 +1,449 @@
|
||||
/// <reference types="node" />
|
||||
import { ApiError } from './api_error';
|
||||
import Stats from './node_fs_stats';
|
||||
import { File } from './file';
|
||||
import { FileFlag } from './file_flag';
|
||||
export type BFSOneArgCallback = (e?: ApiError | null) => any;
|
||||
export type BFSCallback<T> = (e: ApiError | null | undefined, rv?: T) => any;
|
||||
export type BFSThreeArgCallback<T, U> = (e: ApiError | null | undefined, arg1?: T, arg2?: U) => any;
|
||||
/**
|
||||
* Interface for a filesystem. **All** BrowserFS FileSystems should implement
|
||||
* this interface.
|
||||
*
|
||||
* Below, we denote each API method as **Core**, **Supplemental**, or
|
||||
* **Optional**.
|
||||
*
|
||||
* ### Core Methods
|
||||
*
|
||||
* **Core** API methods *need* to be implemented for basic read/write
|
||||
* functionality.
|
||||
*
|
||||
* Note that read-only FileSystems can choose to not implement core methods
|
||||
* that mutate files or metadata. The default implementation will pass a
|
||||
* NOT_SUPPORTED error to the callback.
|
||||
*
|
||||
* ### Supplemental Methods
|
||||
*
|
||||
* **Supplemental** API methods do not need to be implemented by a filesystem.
|
||||
* The default implementation implements all of the supplemental API methods in
|
||||
* terms of the **core** API methods.
|
||||
*
|
||||
* Note that a file system may choose to implement supplemental methods for
|
||||
* efficiency reasons.
|
||||
*
|
||||
* The code for some supplemental methods was adapted directly from NodeJS's
|
||||
* fs.js source code.
|
||||
*
|
||||
* ### Optional Methods
|
||||
*
|
||||
* **Optional** API methods provide functionality that may not be available in
|
||||
* all filesystems. For example, all symlink/hardlink-related API methods fall
|
||||
* under this category.
|
||||
*
|
||||
* The default implementation will pass a NOT_SUPPORTED error to the callback.
|
||||
*
|
||||
* ### Argument Assumptions
|
||||
*
|
||||
* You can assume the following about arguments passed to each API method:
|
||||
*
|
||||
* * **Every path is an absolute path.** Meaning, `.`, `..`, and other items
|
||||
* are resolved into an absolute form.
|
||||
* * **All arguments are present.** Any optional arguments at the Node API level
|
||||
* have been passed in with their default values.
|
||||
* * **The callback will reset the stack depth.** When your filesystem calls the
|
||||
* callback with the requested information, it will use `setImmediate` to
|
||||
* reset the JavaScript stack depth before calling the user-supplied callback.
|
||||
*/
|
||||
export interface FileSystem {
|
||||
/**
|
||||
* **Optional**: Returns the name of the file system.
|
||||
*/
|
||||
getName(): string;
|
||||
/**
|
||||
* **Optional**: Passes the following information to the callback:
|
||||
*
|
||||
* * Total number of bytes available on this file system.
|
||||
* * number of free bytes available on this file system.
|
||||
*
|
||||
* @todo This info is not available through the Node API. Perhaps we could do a
|
||||
* polyfill of diskspace.js, or add a new Node API function.
|
||||
* @param path The path to the location that is being queried. Only
|
||||
* useful for filesystems that support mount points.
|
||||
*/
|
||||
diskSpace(p: string, cb: (total: number, free: number) => any): void;
|
||||
/**
|
||||
* **Core**: Is this filesystem read-only?
|
||||
* @return True if this FileSystem is inherently read-only.
|
||||
*/
|
||||
isReadOnly(): boolean;
|
||||
/**
|
||||
* **Core**: Does the filesystem support optional symlink/hardlink-related
|
||||
* commands?
|
||||
* @return True if the FileSystem supports the optional
|
||||
* symlink/hardlink-related commands.
|
||||
*/
|
||||
supportsLinks(): boolean;
|
||||
/**
|
||||
* **Core**: Does the filesystem support optional property-related commands?
|
||||
* @return True if the FileSystem supports the optional
|
||||
* property-related commands (permissions, utimes, etc).
|
||||
*/
|
||||
supportsProps(): boolean;
|
||||
/**
|
||||
* **Core**: Does the filesystem support the optional synchronous interface?
|
||||
* @return True if the FileSystem supports synchronous operations.
|
||||
*/
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* **Core**: Asynchronous rename. No arguments other than a possible exception
|
||||
* are given to the completion callback.
|
||||
*/
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous rename.
|
||||
*/
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
/**
|
||||
* **Core**: Asynchronous `stat` or `lstat`.
|
||||
* @param isLstat True if this is `lstat`, false if this is regular
|
||||
* `stat`.
|
||||
*/
|
||||
stat(p: string, isLstat: boolean | null, cb: BFSCallback<Stats>): void;
|
||||
/**
|
||||
* **Core**: Synchronous `stat` or `lstat`.
|
||||
* @param isLstat True if this is `lstat`, false if this is regular
|
||||
* `stat`.
|
||||
*/
|
||||
statSync(p: string, isLstat: boolean | null): Stats;
|
||||
/**
|
||||
* **Core**: Asynchronous file open.
|
||||
* @see http://www.manpagez.com/man/2/open/
|
||||
* @param flags Handles the complexity of the various file
|
||||
* modes. See its API for more details.
|
||||
* @param mode Mode to use to open the file. Can be ignored if the
|
||||
* filesystem doesn't support permissions.
|
||||
*/
|
||||
open(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
/**
|
||||
* **Core**: Synchronous file open.
|
||||
* @see http://www.manpagez.com/man/2/open/
|
||||
* @param flags Handles the complexity of the various file
|
||||
* modes. See its API for more details.
|
||||
* @param mode Mode to use to open the file. Can be ignored if the
|
||||
* filesystem doesn't support permissions.
|
||||
*/
|
||||
openSync(p: string, flag: FileFlag, mode: number): File;
|
||||
/**
|
||||
* **Core**: Asynchronous `unlink`.
|
||||
*/
|
||||
unlink(p: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous `unlink`.
|
||||
*/
|
||||
unlinkSync(p: string): void;
|
||||
/**
|
||||
* **Core**: Asynchronous `rmdir`.
|
||||
*/
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous `rmdir`.
|
||||
*/
|
||||
rmdirSync(p: string): void;
|
||||
/**
|
||||
* **Core**: Asynchronous `mkdir`.
|
||||
* @param mode Mode to make the directory using. Can be ignored if
|
||||
* the filesystem doesn't support permissions.
|
||||
*/
|
||||
mkdir(p: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous `mkdir`.
|
||||
* @param mode Mode to make the directory using. Can be ignored if
|
||||
* the filesystem doesn't support permissions.
|
||||
*/
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
/**
|
||||
* **Core**: Asynchronous `readdir`. Reads the contents of a directory.
|
||||
*
|
||||
* The callback gets two arguments `(err, files)` where `files` is an array of
|
||||
* the names of the files in the directory excluding `'.'` and `'..'`.
|
||||
*/
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
/**
|
||||
* **Core**: Synchronous `readdir`. Reads the contents of a directory.
|
||||
*/
|
||||
readdirSync(p: string): string[];
|
||||
/**
|
||||
* **Supplemental**: Test whether or not the given path exists by checking with
|
||||
* the file system. Then call the callback argument with either true or false.
|
||||
*/
|
||||
exists(p: string, cb: (exists: boolean) => void): void;
|
||||
/**
|
||||
* **Supplemental**: Test whether or not the given path exists by checking with
|
||||
* the file system.
|
||||
*/
|
||||
existsSync(p: string): boolean;
|
||||
/**
|
||||
* **Supplemental**: Asynchronous `realpath`. The callback gets two arguments
|
||||
* `(err, resolvedPath)`.
|
||||
*
|
||||
* Note that the Node API will resolve `path` to an absolute path.
|
||||
* @param cache An object literal of mapped paths that can be used to
|
||||
* force a specific path resolution or avoid additional `fs.stat` calls for
|
||||
* known real paths. If not supplied by the user, it'll be an empty object.
|
||||
*/
|
||||
realpath(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}, cb: BFSCallback<string>): void;
|
||||
/**
|
||||
* **Supplemental**: Synchronous `realpath`.
|
||||
*
|
||||
* Note that the Node API will resolve `path` to an absolute path.
|
||||
* @param cache An object literal of mapped paths that can be used to
|
||||
* force a specific path resolution or avoid additional `fs.stat` calls for
|
||||
* known real paths. If not supplied by the user, it'll be an empty object.
|
||||
*/
|
||||
realpathSync(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}): string;
|
||||
/**
|
||||
* **Supplemental**: Asynchronous `truncate`.
|
||||
*/
|
||||
truncate(p: string, len: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Supplemental**: Synchronous `truncate`.
|
||||
*/
|
||||
truncateSync(p: string, len: number): void;
|
||||
/**
|
||||
* **Supplemental**: Asynchronously reads the entire contents of a file.
|
||||
* @param encoding If non-null, the file's contents should be decoded
|
||||
* into a string using that encoding. Otherwise, if encoding is null, fetch
|
||||
* the file's contents as a Buffer.
|
||||
* @param cb If no encoding is specified, then the raw buffer is returned.
|
||||
*/
|
||||
readFile(fname: string, encoding: string | null, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
/**
|
||||
* **Supplemental**: Synchronously reads the entire contents of a file.
|
||||
* @param encoding If non-null, the file's contents should be decoded
|
||||
* into a string using that encoding. Otherwise, if encoding is null, fetch
|
||||
* the file's contents as a Buffer.
|
||||
*/
|
||||
readFileSync(fname: string, encoding: string | null, flag: FileFlag): any;
|
||||
/**
|
||||
* **Supplemental**: Asynchronously writes data to a file, replacing the file
|
||||
* if it already exists.
|
||||
*
|
||||
* The encoding option is ignored if data is a buffer.
|
||||
*/
|
||||
writeFile(fname: string, data: any, encoding: string | null, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Supplemental**: Synchronously writes data to a file, replacing the file
|
||||
* if it already exists.
|
||||
*
|
||||
* The encoding option is ignored if data is a buffer.
|
||||
*/
|
||||
writeFileSync(fname: string, data: string | Buffer, encoding: string | null, flag: FileFlag, mode: number): void;
|
||||
/**
|
||||
* **Supplemental**: Asynchronously append data to a file, creating the file if
|
||||
* it not yet exists.
|
||||
*/
|
||||
appendFile(fname: string, data: string | Buffer, encoding: string | null, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Supplemental**: Synchronously append data to a file, creating the file if
|
||||
* it not yet exists.
|
||||
*/
|
||||
appendFileSync(fname: string, data: string | Buffer, encoding: string | null, flag: FileFlag, mode: number): void;
|
||||
/**
|
||||
* **Optional**: Asynchronous `chmod` or `lchmod`.
|
||||
* @param isLchmod `True` if `lchmod`, false if `chmod`. Has no
|
||||
* bearing on result if links aren't supported.
|
||||
*/
|
||||
chmod(p: string, isLchmod: boolean, mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Optional**: Synchronous `chmod` or `lchmod`.
|
||||
* @param isLchmod `True` if `lchmod`, false if `chmod`. Has no
|
||||
* bearing on result if links aren't supported.
|
||||
*/
|
||||
chmodSync(p: string, isLchmod: boolean, mode: number): void;
|
||||
/**
|
||||
* **Optional**: Asynchronous `chown` or `lchown`.
|
||||
* @param isLchown `True` if `lchown`, false if `chown`. Has no
|
||||
* bearing on result if links aren't supported.
|
||||
*/
|
||||
chown(p: string, isLchown: boolean, uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Optional**: Synchronous `chown` or `lchown`.
|
||||
* @param isLchown `True` if `lchown`, false if `chown`. Has no
|
||||
* bearing on result if links aren't supported.
|
||||
*/
|
||||
chownSync(p: string, isLchown: boolean, uid: number, gid: number): void;
|
||||
/**
|
||||
* **Optional**: Change file timestamps of the file referenced by the supplied
|
||||
* path.
|
||||
*/
|
||||
utimes(p: string, atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Optional**: Change file timestamps of the file referenced by the supplied
|
||||
* path.
|
||||
*/
|
||||
utimesSync(p: string, atime: Date, mtime: Date): void;
|
||||
/**
|
||||
* **Optional**: Asynchronous `link`.
|
||||
*/
|
||||
link(srcpath: string, dstpath: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Optional**: Synchronous `link`.
|
||||
*/
|
||||
linkSync(srcpath: string, dstpath: string): void;
|
||||
/**
|
||||
* **Optional**: Asynchronous `symlink`.
|
||||
* @param type can be either `'dir'` or `'file'`
|
||||
*/
|
||||
symlink(srcpath: string, dstpath: string, type: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Optional**: Synchronous `symlink`.
|
||||
* @param type can be either `'dir'` or `'file'`
|
||||
*/
|
||||
symlinkSync(srcpath: string, dstpath: string, type: string): void;
|
||||
/**
|
||||
* **Optional**: Asynchronous readlink.
|
||||
*/
|
||||
readlink(p: string, cb: BFSCallback<string>): void;
|
||||
/**
|
||||
* **Optional**: Synchronous readlink.
|
||||
*/
|
||||
readlinkSync(p: string): string;
|
||||
}
|
||||
/**
|
||||
* Describes a file system option.
|
||||
*/
|
||||
export interface FileSystemOption<T> {
|
||||
type: string | string[];
|
||||
optional?: boolean;
|
||||
description: string;
|
||||
validator?(opt: T, cb: BFSOneArgCallback): void;
|
||||
}
|
||||
/**
|
||||
* Describes all of the options available in a file system.
|
||||
*/
|
||||
export interface FileSystemOptions {
|
||||
[name: string]: FileSystemOption<any>;
|
||||
}
|
||||
/**
|
||||
* Contains typings for static functions on the file system constructor.
|
||||
*/
|
||||
export interface FileSystemConstructor {
|
||||
/**
|
||||
* **Core**: Name to identify this particular file system.
|
||||
*/
|
||||
Name: string;
|
||||
/**
|
||||
* **Core**: Describes all of the options available for this file system.
|
||||
*/
|
||||
Options: FileSystemOptions;
|
||||
/**
|
||||
* **Core**: Creates a file system of this given type with the given
|
||||
* options.
|
||||
*/
|
||||
Create(options: object, cb: BFSCallback<FileSystem>): void;
|
||||
/**
|
||||
* **Core**: Returns 'true' if this filesystem is available in the current
|
||||
* environment. For example, a `localStorage`-backed filesystem will return
|
||||
* 'false' if the browser does not support that API.
|
||||
*
|
||||
* Defaults to 'false', as the FileSystem base class isn't usable alone.
|
||||
*/
|
||||
isAvailable(): boolean;
|
||||
}
|
||||
/**
|
||||
* Basic filesystem class. Most filesystems should extend this class, as it
|
||||
* provides default implementations for a handful of methods.
|
||||
*/
|
||||
export declare class BaseFileSystem {
|
||||
supportsLinks(): boolean;
|
||||
diskSpace(p: string, cb: (total: number, free: number) => any): void;
|
||||
/**
|
||||
* Opens the file at path p with the given flag. The file must exist.
|
||||
* @param p The path to open.
|
||||
* @param flag The flag to use when opening the file.
|
||||
*/
|
||||
openFile(p: string, flag: FileFlag, cb: BFSCallback<File>): void;
|
||||
/**
|
||||
* Create the file at path p with the given mode. Then, open it with the given
|
||||
* flag.
|
||||
*/
|
||||
createFile(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
open(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
stat(p: string, isLstat: boolean | null, cb: BFSCallback<Stats>): void;
|
||||
statSync(p: string, isLstat: boolean | null): Stats;
|
||||
/**
|
||||
* Opens the file at path p with the given flag. The file must exist.
|
||||
* @param p The path to open.
|
||||
* @param flag The flag to use when opening the file.
|
||||
* @return A File object corresponding to the opened file.
|
||||
*/
|
||||
openFileSync(p: string, flag: FileFlag, mode: number): File;
|
||||
/**
|
||||
* Create the file at path p with the given mode. Then, open it with the given
|
||||
* flag.
|
||||
*/
|
||||
createFileSync(p: string, flag: FileFlag, mode: number): File;
|
||||
openSync(p: string, flag: FileFlag, mode: number): File;
|
||||
unlink(p: string, cb: BFSOneArgCallback): void;
|
||||
unlinkSync(p: string): void;
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdir(p: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(p: string): string[];
|
||||
exists(p: string, cb: (exists: boolean) => void): void;
|
||||
existsSync(p: string): boolean;
|
||||
realpath(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}, cb: BFSCallback<string>): void;
|
||||
realpathSync(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}): string;
|
||||
truncate(p: string, len: number, cb: BFSOneArgCallback): void;
|
||||
truncateSync(p: string, len: number): void;
|
||||
readFile(fname: string, encoding: string | null, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
readFileSync(fname: string, encoding: string | null, flag: FileFlag): any;
|
||||
writeFile(fname: string, data: any, encoding: string | null, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
writeFileSync(fname: string, data: any, encoding: string | null, flag: FileFlag, mode: number): void;
|
||||
appendFile(fname: string, data: any, encoding: string | null, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
appendFileSync(fname: string, data: any, encoding: string | null, flag: FileFlag, mode: number): void;
|
||||
chmod(p: string, isLchmod: boolean, mode: number, cb: BFSOneArgCallback): void;
|
||||
chmodSync(p: string, isLchmod: boolean, mode: number): void;
|
||||
chown(p: string, isLchown: boolean, uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
chownSync(p: string, isLchown: boolean, uid: number, gid: number): void;
|
||||
utimes(p: string, atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
utimesSync(p: string, atime: Date, mtime: Date): void;
|
||||
link(srcpath: string, dstpath: string, cb: BFSOneArgCallback): void;
|
||||
linkSync(srcpath: string, dstpath: string): void;
|
||||
symlink(srcpath: string, dstpath: string, type: string, cb: BFSOneArgCallback): void;
|
||||
symlinkSync(srcpath: string, dstpath: string, type: string): void;
|
||||
readlink(p: string, cb: BFSOneArgCallback): void;
|
||||
readlinkSync(p: string): string;
|
||||
}
|
||||
/**
|
||||
* Implements the asynchronous API in terms of the synchronous API.
|
||||
* @class SynchronousFileSystem
|
||||
*/
|
||||
export declare class SynchronousFileSystem extends BaseFileSystem {
|
||||
supportsSynch(): boolean;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
stat(p: string, isLstat: boolean | null, cb: BFSCallback<Stats>): void;
|
||||
open(p: string, flags: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
unlink(p: string, cb: BFSOneArgCallback): void;
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
mkdir(p: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
chmod(p: string, isLchmod: boolean, mode: number, cb: BFSOneArgCallback): void;
|
||||
chown(p: string, isLchown: boolean, uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
utimes(p: string, atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
link(srcpath: string, dstpath: string, cb: BFSOneArgCallback): void;
|
||||
symlink(srcpath: string, dstpath: string, type: string, cb: BFSOneArgCallback): void;
|
||||
readlink(p: string, cb: BFSCallback<string>): void;
|
||||
}
|
||||
585
sandpack-generated/static/browserfs11/node/core/file_system.js
Normal file
585
sandpack-generated/static/browserfs11/node/core/file_system.js
Normal file
@@ -0,0 +1,585 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SynchronousFileSystem = exports.BaseFileSystem = void 0;
|
||||
var api_error_1 = require("./api_error");
|
||||
var file_flag_1 = require("./file_flag");
|
||||
var path = require("path");
|
||||
var util_1 = require("./util");
|
||||
/**
|
||||
* Basic filesystem class. Most filesystems should extend this class, as it
|
||||
* provides default implementations for a handful of methods.
|
||||
*/
|
||||
var BaseFileSystem = /** @class */ (function () {
|
||||
function BaseFileSystem() {
|
||||
}
|
||||
BaseFileSystem.prototype.supportsLinks = function () {
|
||||
return false;
|
||||
};
|
||||
BaseFileSystem.prototype.diskSpace = function (p, cb) {
|
||||
cb(0, 0);
|
||||
};
|
||||
/**
|
||||
* Opens the file at path p with the given flag. The file must exist.
|
||||
* @param p The path to open.
|
||||
* @param flag The flag to use when opening the file.
|
||||
*/
|
||||
BaseFileSystem.prototype.openFile = function (p, flag, cb) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
/**
|
||||
* Create the file at path p with the given mode. Then, open it with the given
|
||||
* flag.
|
||||
*/
|
||||
BaseFileSystem.prototype.createFile = function (p, flag, mode, cb) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.open = function (p, flag, mode, cb) {
|
||||
var _this = this;
|
||||
var mustBeFile = function (e, stats) {
|
||||
if (e) {
|
||||
// File does not exist.
|
||||
switch (flag.pathNotExistsAction()) {
|
||||
case file_flag_1.ActionType.CREATE_FILE:
|
||||
// Ensure parent exists.
|
||||
return _this.stat(path.dirname(p), false, function (e, parentStats) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else if (parentStats && !parentStats.isDirectory()) {
|
||||
cb(api_error_1.ApiError.ENOTDIR(path.dirname(p)));
|
||||
}
|
||||
else {
|
||||
_this.createFile(p, flag, mode, cb);
|
||||
}
|
||||
});
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
return cb(api_error_1.ApiError.ENOENT(p));
|
||||
default:
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileFlag object.'));
|
||||
}
|
||||
}
|
||||
else {
|
||||
// File exists.
|
||||
if (stats && stats.isDirectory()) {
|
||||
return cb(api_error_1.ApiError.EISDIR(p));
|
||||
}
|
||||
switch (flag.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
return cb(api_error_1.ApiError.EEXIST(p));
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
// NOTE: In a previous implementation, we deleted the file and
|
||||
// re-created it. However, this created a race condition if another
|
||||
// asynchronous request was trying to read the file, as the file
|
||||
// would not exist for a small period of time.
|
||||
return _this.openFile(p, flag, function (e, fd) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
}
|
||||
else if (fd) {
|
||||
fd.truncate(0, function () {
|
||||
fd.sync(function () {
|
||||
cb(null, fd);
|
||||
});
|
||||
});
|
||||
}
|
||||
else {
|
||||
(0, util_1.fail)();
|
||||
}
|
||||
});
|
||||
case file_flag_1.ActionType.NOP:
|
||||
return _this.openFile(p, flag, cb);
|
||||
default:
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileFlag object.'));
|
||||
}
|
||||
}
|
||||
};
|
||||
this.stat(p, false, mustBeFile);
|
||||
};
|
||||
BaseFileSystem.prototype.rename = function (oldPath, newPath, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.renameSync = function (oldPath, newPath) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.stat = function (p, isLstat, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.statSync = function (p, isLstat) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
/**
|
||||
* Opens the file at path p with the given flag. The file must exist.
|
||||
* @param p The path to open.
|
||||
* @param flag The flag to use when opening the file.
|
||||
* @return A File object corresponding to the opened file.
|
||||
*/
|
||||
BaseFileSystem.prototype.openFileSync = function (p, flag, mode) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
/**
|
||||
* Create the file at path p with the given mode. Then, open it with the given
|
||||
* flag.
|
||||
*/
|
||||
BaseFileSystem.prototype.createFileSync = function (p, flag, mode) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.openSync = function (p, flag, mode) {
|
||||
// Check if the path exists, and is a file.
|
||||
var stats;
|
||||
try {
|
||||
stats = this.statSync(p, false);
|
||||
}
|
||||
catch (e) {
|
||||
// File does not exist.
|
||||
switch (flag.pathNotExistsAction()) {
|
||||
case file_flag_1.ActionType.CREATE_FILE:
|
||||
// Ensure parent exists.
|
||||
var parentStats = this.statSync(path.dirname(p), false);
|
||||
if (!parentStats.isDirectory()) {
|
||||
throw api_error_1.ApiError.ENOTDIR(path.dirname(p));
|
||||
}
|
||||
return this.createFileSync(p, flag, mode);
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
throw api_error_1.ApiError.ENOENT(p);
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileFlag object.');
|
||||
}
|
||||
}
|
||||
// File exists.
|
||||
if (stats.isDirectory()) {
|
||||
throw api_error_1.ApiError.EISDIR(p);
|
||||
}
|
||||
switch (flag.pathExistsAction()) {
|
||||
case file_flag_1.ActionType.THROW_EXCEPTION:
|
||||
throw api_error_1.ApiError.EEXIST(p);
|
||||
case file_flag_1.ActionType.TRUNCATE_FILE:
|
||||
// Delete file.
|
||||
this.unlinkSync(p);
|
||||
// Create file. Use the same mode as the old file.
|
||||
// Node itself modifies the ctime when this occurs, so this action
|
||||
// will preserve that behavior if the underlying file system
|
||||
// supports those properties.
|
||||
return this.createFileSync(p, flag, stats.mode);
|
||||
case file_flag_1.ActionType.NOP:
|
||||
return this.openFileSync(p, flag, mode);
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, 'Invalid FileFlag object.');
|
||||
}
|
||||
};
|
||||
BaseFileSystem.prototype.unlink = function (p, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.unlinkSync = function (p) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.rmdir = function (p, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.rmdirSync = function (p) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.mkdir = function (p, mode, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.mkdirSync = function (p, mode) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.readdir = function (p, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.readdirSync = function (p) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.exists = function (p, cb) {
|
||||
this.stat(p, null, function (err) {
|
||||
cb(!err);
|
||||
});
|
||||
};
|
||||
BaseFileSystem.prototype.existsSync = function (p) {
|
||||
try {
|
||||
this.statSync(p, true);
|
||||
return true;
|
||||
}
|
||||
catch (e) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
BaseFileSystem.prototype.realpath = function (p, cache, cb) {
|
||||
if (this.supportsLinks()) {
|
||||
// The path could contain symlinks. Split up the path,
|
||||
// resolve any symlinks, return the resolved string.
|
||||
var splitPath = p.split(path.sep);
|
||||
// TODO: Simpler to just pass through file, find sep and such.
|
||||
for (var i = 0; i < splitPath.length; i++) {
|
||||
var addPaths = splitPath.slice(0, i + 1);
|
||||
splitPath[i] = path.join.apply(null, addPaths);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// No symlinks. We just need to verify that it exists.
|
||||
this.exists(p, function (doesExist) {
|
||||
if (doesExist) {
|
||||
cb(null, p);
|
||||
}
|
||||
else {
|
||||
cb(api_error_1.ApiError.ENOENT(p));
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
BaseFileSystem.prototype.realpathSync = function (p, cache) {
|
||||
if (this.supportsLinks()) {
|
||||
// The path could contain symlinks. Split up the path,
|
||||
// resolve any symlinks, return the resolved string.
|
||||
var splitPath = p.split(path.sep);
|
||||
// TODO: Simpler to just pass through file, find sep and such.
|
||||
for (var i = 0; i < splitPath.length; i++) {
|
||||
var addPaths = splitPath.slice(0, i + 1);
|
||||
splitPath[i] = path.join.apply(path, addPaths);
|
||||
}
|
||||
return splitPath.join(path.sep);
|
||||
}
|
||||
else {
|
||||
// No symlinks. We just need to verify that it exists.
|
||||
if (this.existsSync(p)) {
|
||||
return p;
|
||||
}
|
||||
else {
|
||||
throw api_error_1.ApiError.ENOENT(p);
|
||||
}
|
||||
}
|
||||
};
|
||||
BaseFileSystem.prototype.truncate = function (p, len, cb) {
|
||||
this.open(p, file_flag_1.FileFlag.getFileFlag('r+'), 0x1a4, (function (er, fd) {
|
||||
if (er) {
|
||||
return cb(er);
|
||||
}
|
||||
fd.truncate(len, (function (er) {
|
||||
fd.close((function (er2) {
|
||||
cb(er || er2);
|
||||
}));
|
||||
}));
|
||||
}));
|
||||
};
|
||||
BaseFileSystem.prototype.truncateSync = function (p, len) {
|
||||
var fd = this.openSync(p, file_flag_1.FileFlag.getFileFlag('r+'), 0x1a4);
|
||||
// Need to safely close FD, regardless of whether or not truncate succeeds.
|
||||
try {
|
||||
fd.truncateSync(len);
|
||||
}
|
||||
catch (e) {
|
||||
throw e;
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
BaseFileSystem.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
// Wrap cb in file closing code.
|
||||
var oldCb = cb;
|
||||
// Get file.
|
||||
this.open(fname, flag, 0x1a4, function (err, fd) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
cb = function (err, arg) {
|
||||
fd.close(function (err2) {
|
||||
if (!err) {
|
||||
err = err2;
|
||||
}
|
||||
return oldCb(err, arg);
|
||||
});
|
||||
};
|
||||
fd.stat(function (err, stat) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
// Allocate buffer.
|
||||
var buf = Buffer.alloc(stat.size);
|
||||
fd.read(buf, 0, stat.size, 0, function (err) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
else if (encoding === null) {
|
||||
return cb(err, buf);
|
||||
}
|
||||
try {
|
||||
cb(null, buf.toString(encoding));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
BaseFileSystem.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
// Get file.
|
||||
var fd = this.openSync(fname, flag, 0x1a4);
|
||||
try {
|
||||
var stat = fd.statSync();
|
||||
// Allocate buffer.
|
||||
var buf = Buffer.alloc(stat.size);
|
||||
fd.readSync(buf, 0, stat.size, 0);
|
||||
fd.closeSync();
|
||||
if (encoding === null) {
|
||||
return buf;
|
||||
}
|
||||
return buf.toString(encoding);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
BaseFileSystem.prototype.writeFile = function (fname, data, encoding, flag, mode, cb) {
|
||||
// Wrap cb in file closing code.
|
||||
var oldCb = cb;
|
||||
// Get file.
|
||||
this.open(fname, flag, 0x1a4, function (err, fd) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
cb = function (err) {
|
||||
fd.close(function (err2) {
|
||||
oldCb(err ? err : err2);
|
||||
});
|
||||
};
|
||||
try {
|
||||
if (typeof data === 'string') {
|
||||
data = Buffer.from(data, encoding);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
return cb(e);
|
||||
}
|
||||
// Write into file.
|
||||
fd.write(data, 0, data.length, 0, cb);
|
||||
});
|
||||
};
|
||||
BaseFileSystem.prototype.writeFileSync = function (fname, data, encoding, flag, mode) {
|
||||
// Get file.
|
||||
var fd = this.openSync(fname, flag, mode);
|
||||
try {
|
||||
if (typeof data === 'string') {
|
||||
data = Buffer.from(data, encoding);
|
||||
}
|
||||
// Write into file.
|
||||
fd.writeSync(data, 0, data.length, 0);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
BaseFileSystem.prototype.appendFile = function (fname, data, encoding, flag, mode, cb) {
|
||||
// Wrap cb in file closing code.
|
||||
var oldCb = cb;
|
||||
this.open(fname, flag, mode, function (err, fd) {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
cb = function (err) {
|
||||
fd.close(function (err2) {
|
||||
oldCb(err ? err : err2);
|
||||
});
|
||||
};
|
||||
if (typeof data === 'string') {
|
||||
data = Buffer.from(data, encoding);
|
||||
}
|
||||
fd.write(data, 0, data.length, null, cb);
|
||||
});
|
||||
};
|
||||
BaseFileSystem.prototype.appendFileSync = function (fname, data, encoding, flag, mode) {
|
||||
var fd = this.openSync(fname, flag, mode);
|
||||
try {
|
||||
if (typeof data === 'string') {
|
||||
data = Buffer.from(data, encoding);
|
||||
}
|
||||
fd.writeSync(data, 0, data.length, null);
|
||||
}
|
||||
finally {
|
||||
fd.closeSync();
|
||||
}
|
||||
};
|
||||
BaseFileSystem.prototype.chmod = function (p, isLchmod, mode, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.chmodSync = function (p, isLchmod, mode) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.chown = function (p, isLchown, uid, gid, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.chownSync = function (p, isLchown, uid, gid) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.utimes = function (p, atime, mtime, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.utimesSync = function (p, atime, mtime) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.link = function (srcpath, dstpath, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.linkSync = function (srcpath, dstpath) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.symlink = function (srcpath, dstpath, type, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.symlinkSync = function (srcpath, dstpath, type) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
BaseFileSystem.prototype.readlink = function (p, cb) {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP));
|
||||
};
|
||||
BaseFileSystem.prototype.readlinkSync = function (p) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
return BaseFileSystem;
|
||||
}());
|
||||
exports.BaseFileSystem = BaseFileSystem;
|
||||
/**
|
||||
* Implements the asynchronous API in terms of the synchronous API.
|
||||
* @class SynchronousFileSystem
|
||||
*/
|
||||
var SynchronousFileSystem = /** @class */ (function (_super) {
|
||||
__extends(SynchronousFileSystem, _super);
|
||||
function SynchronousFileSystem() {
|
||||
return _super !== null && _super.apply(this, arguments) || this;
|
||||
}
|
||||
SynchronousFileSystem.prototype.supportsSynch = function () {
|
||||
return true;
|
||||
};
|
||||
SynchronousFileSystem.prototype.rename = function (oldPath, newPath, cb) {
|
||||
try {
|
||||
this.renameSync(oldPath, newPath);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.stat = function (p, isLstat, cb) {
|
||||
try {
|
||||
cb(null, this.statSync(p, isLstat));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.open = function (p, flags, mode, cb) {
|
||||
try {
|
||||
cb(null, this.openSync(p, flags, mode));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.unlink = function (p, cb) {
|
||||
try {
|
||||
this.unlinkSync(p);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.rmdir = function (p, cb) {
|
||||
try {
|
||||
this.rmdirSync(p);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.mkdir = function (p, mode, cb) {
|
||||
try {
|
||||
this.mkdirSync(p, mode);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.readdir = function (p, cb) {
|
||||
try {
|
||||
cb(null, this.readdirSync(p));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.chmod = function (p, isLchmod, mode, cb) {
|
||||
try {
|
||||
this.chmodSync(p, isLchmod, mode);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.chown = function (p, isLchown, uid, gid, cb) {
|
||||
try {
|
||||
this.chownSync(p, isLchown, uid, gid);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.utimes = function (p, atime, mtime, cb) {
|
||||
try {
|
||||
this.utimesSync(p, atime, mtime);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.link = function (srcpath, dstpath, cb) {
|
||||
try {
|
||||
this.linkSync(srcpath, dstpath);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.symlink = function (srcpath, dstpath, type, cb) {
|
||||
try {
|
||||
this.symlinkSync(srcpath, dstpath, type);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
SynchronousFileSystem.prototype.readlink = function (p, cb) {
|
||||
try {
|
||||
cb(null, this.readlinkSync(p));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
return SynchronousFileSystem;
|
||||
}(BaseFileSystem));
|
||||
exports.SynchronousFileSystem = SynchronousFileSystem;
|
||||
//# sourceMappingURL=file_system.js.map
|
||||
19
sandpack-generated/static/browserfs11/node/core/file_watcher.d.ts
vendored
Normal file
19
sandpack-generated/static/browserfs11/node/core/file_watcher.d.ts
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
/// <reference types="node" />
|
||||
import * as _fs from 'fs';
|
||||
import Stats from './node_fs_stats';
|
||||
export declare class FileWatcher {
|
||||
triggerWatch(filename: string, event: 'change' | 'rename', newStats?: Stats): void;
|
||||
watch(filename: string, listener?: (event: string, filename: string) => any): _fs.FSWatcher;
|
||||
watch(filename: string, options: {
|
||||
recursive?: boolean;
|
||||
persistent?: boolean;
|
||||
}, listener?: (event: string, filename: string) => any): _fs.FSWatcher;
|
||||
watchFile(curr: Stats, filename: string, listener: (curr: Stats, prev: Stats) => void): void;
|
||||
watchFile(curr: Stats, filename: string, options: {
|
||||
persistent?: boolean;
|
||||
interval?: number;
|
||||
}, listener: (curr: Stats, prev: Stats) => void): void;
|
||||
unwatchFile(filename: string, listener: (curr: Stats, prev: Stats) => void): any;
|
||||
private watchEntries;
|
||||
private removeEntry;
|
||||
}
|
||||
@@ -0,0 +1,90 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.FileWatcher = void 0;
|
||||
var EventEmitter = require('events');
|
||||
var FileWatcher = /** @class */ (function () {
|
||||
function FileWatcher() {
|
||||
this.watchEntries = [];
|
||||
}
|
||||
FileWatcher.prototype.triggerWatch = function (filename, event, newStats) {
|
||||
var _this = this;
|
||||
var validEntries = this.watchEntries.filter(function (entry) {
|
||||
if (entry.filename === filename) {
|
||||
return true;
|
||||
}
|
||||
if (entry.recursive && filename.startsWith(entry.filename)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
validEntries.forEach(function (entry) {
|
||||
if (entry.callback) {
|
||||
entry.callback(event, filename);
|
||||
}
|
||||
var newStatsArg = newStats || entry.curr;
|
||||
var oldStatsArg = entry.curr || newStats;
|
||||
if (newStatsArg && oldStatsArg && entry.fileCallback) {
|
||||
entry.fileCallback(newStatsArg, oldStatsArg);
|
||||
entry.curr = newStatsArg;
|
||||
}
|
||||
entry.watcher.emit(event);
|
||||
if (!entry.persistent) {
|
||||
_this.removeEntry(entry);
|
||||
}
|
||||
});
|
||||
};
|
||||
FileWatcher.prototype.watch = function (filename, arg2, listener) {
|
||||
var _this = this;
|
||||
if (listener === void 0) { listener = (function () { }); }
|
||||
var watcher = new EventEmitter();
|
||||
var watchEntry = {
|
||||
filename: filename,
|
||||
watcher: watcher,
|
||||
};
|
||||
watcher.close = function () {
|
||||
_this.removeEntry(watchEntry);
|
||||
};
|
||||
if (typeof arg2 === 'object') {
|
||||
watchEntry.recursive = arg2.recursive;
|
||||
watchEntry.persistent = arg2.persistent === undefined ? true : arg2.persistent;
|
||||
watchEntry.callback = listener;
|
||||
}
|
||||
else if (typeof arg2 === 'function') {
|
||||
watchEntry.callback = arg2;
|
||||
}
|
||||
this.watchEntries.push(watchEntry);
|
||||
return watchEntry.watcher;
|
||||
};
|
||||
FileWatcher.prototype.watchFile = function (curr, filename, arg2, listener) {
|
||||
var _this = this;
|
||||
if (listener === void 0) { listener = (function () { }); }
|
||||
var watcher = new EventEmitter();
|
||||
var watchEntry = {
|
||||
filename: filename,
|
||||
watcher: watcher,
|
||||
curr: curr,
|
||||
};
|
||||
watcher.close = function () {
|
||||
_this.removeEntry(watchEntry);
|
||||
};
|
||||
if (typeof arg2 === 'object') {
|
||||
watchEntry.recursive = arg2.recursive;
|
||||
watchEntry.persistent = arg2.persistent === undefined ? true : arg2.persistent;
|
||||
watchEntry.fileCallback = listener;
|
||||
}
|
||||
else if (typeof arg2 === 'function') {
|
||||
watchEntry.fileCallback = arg2;
|
||||
}
|
||||
this.watchEntries.push(watchEntry);
|
||||
return watchEntry.watcher;
|
||||
};
|
||||
FileWatcher.prototype.unwatchFile = function (filename, listener) {
|
||||
this.watchEntries = this.watchEntries.filter(function (entry) { return entry.filename !== filename && entry.fileCallback !== listener; });
|
||||
};
|
||||
FileWatcher.prototype.removeEntry = function (watchEntry) {
|
||||
this.watchEntries = this.watchEntries.filter(function (en) { return en !== watchEntry; });
|
||||
};
|
||||
return FileWatcher;
|
||||
}());
|
||||
exports.FileWatcher = FileWatcher;
|
||||
//# sourceMappingURL=file_watcher.js.map
|
||||
5
sandpack-generated/static/browserfs11/node/core/global.d.ts
vendored
Normal file
5
sandpack-generated/static/browserfs11/node/core/global.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
declare const toExport: any;
|
||||
export default toExport;
|
||||
@@ -0,0 +1,8 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var toExport = typeof (window) !== 'undefined' ? window : typeof (self) !== 'undefined' ? self : global;
|
||||
exports.default = toExport;
|
||||
//# sourceMappingURL=global.js.map
|
||||
6
sandpack-generated/static/browserfs11/node/core/levenshtein.d.ts
vendored
Normal file
6
sandpack-generated/static/browserfs11/node/core/levenshtein.d.ts
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
/**
|
||||
* Calculates levenshtein distance.
|
||||
* @param a
|
||||
* @param b
|
||||
*/
|
||||
export default function levenshtein(a: string, b: string): number;
|
||||
@@ -0,0 +1,92 @@
|
||||
"use strict";
|
||||
/*
|
||||
* Levenshtein distance, from the `js-levenshtein` NPM module.
|
||||
* Copied here to avoid complexity of adding another CommonJS module dependency.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
function _min(d0, d1, d2, bx, ay) {
|
||||
return d0 < d1 || d2 < d1
|
||||
? d0 > d2
|
||||
? d2 + 1
|
||||
: d0 + 1
|
||||
: bx === ay
|
||||
? d1
|
||||
: d1 + 1;
|
||||
}
|
||||
/**
|
||||
* Calculates levenshtein distance.
|
||||
* @param a
|
||||
* @param b
|
||||
*/
|
||||
function levenshtein(a, b) {
|
||||
if (a === b) {
|
||||
return 0;
|
||||
}
|
||||
if (a.length > b.length) {
|
||||
var tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
}
|
||||
var la = a.length;
|
||||
var lb = b.length;
|
||||
while (la > 0 && (a.charCodeAt(la - 1) === b.charCodeAt(lb - 1))) {
|
||||
la--;
|
||||
lb--;
|
||||
}
|
||||
var offset = 0;
|
||||
while (offset < la && (a.charCodeAt(offset) === b.charCodeAt(offset))) {
|
||||
offset++;
|
||||
}
|
||||
la -= offset;
|
||||
lb -= offset;
|
||||
if (la === 0 || lb === 1) {
|
||||
return lb;
|
||||
}
|
||||
var vector = new Array(la << 1);
|
||||
for (var y = 0; y < la;) {
|
||||
vector[la + y] = a.charCodeAt(offset + y);
|
||||
vector[y] = ++y;
|
||||
}
|
||||
var x;
|
||||
var d0;
|
||||
var d1;
|
||||
var d2;
|
||||
var d3;
|
||||
for (x = 0; (x + 3) < lb;) {
|
||||
var bx0 = b.charCodeAt(offset + (d0 = x));
|
||||
var bx1 = b.charCodeAt(offset + (d1 = x + 1));
|
||||
var bx2 = b.charCodeAt(offset + (d2 = x + 2));
|
||||
var bx3 = b.charCodeAt(offset + (d3 = x + 3));
|
||||
var dd_1 = (x += 4);
|
||||
for (var y = 0; y < la;) {
|
||||
var ay = vector[la + y];
|
||||
var dy = vector[y];
|
||||
d0 = _min(dy, d0, d1, bx0, ay);
|
||||
d1 = _min(d0, d1, d2, bx1, ay);
|
||||
d2 = _min(d1, d2, d3, bx2, ay);
|
||||
dd_1 = _min(d2, d3, dd_1, bx3, ay);
|
||||
vector[y++] = dd_1;
|
||||
d3 = d2;
|
||||
d2 = d1;
|
||||
d1 = d0;
|
||||
d0 = dy;
|
||||
}
|
||||
}
|
||||
var dd = 0;
|
||||
for (; x < lb;) {
|
||||
var bx0 = b.charCodeAt(offset + (d0 = x));
|
||||
dd = ++x;
|
||||
for (var y = 0; y < la; y++) {
|
||||
var dy = vector[y];
|
||||
vector[y] = dd = dy < d0 || dd < d0
|
||||
? dy > dd ? dd + 1 : dy + 1
|
||||
: bx0 === vector[la + y]
|
||||
? d0
|
||||
: d0 + 1;
|
||||
d0 = dy;
|
||||
}
|
||||
}
|
||||
return dd;
|
||||
}
|
||||
exports.default = levenshtein;
|
||||
//# sourceMappingURL=levenshtein.js.map
|
||||
6
sandpack-generated/static/browserfs11/node/core/node_fs.d.ts
vendored
Normal file
6
sandpack-generated/static/browserfs11/node/core/node_fs.d.ts
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
import { FSModule } from './FS';
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
declare const _fsMock: FSModule;
|
||||
export default _fsMock;
|
||||
44
sandpack-generated/static/browserfs11/node/core/node_fs.js
Normal file
44
sandpack-generated/static/browserfs11/node/core/node_fs.js
Normal file
@@ -0,0 +1,44 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var FS_1 = require("./FS");
|
||||
// Manually export the individual public functions of fs.
|
||||
// Required because some code will invoke functions off of the module.
|
||||
// e.g.:
|
||||
// let writeFile = fs.writeFile;
|
||||
// writeFile(...)
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var fs = new FS_1.default();
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var _fsMock = {};
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var fsProto = FS_1.default.prototype;
|
||||
Object.keys(fsProto).forEach(function (key) {
|
||||
if (typeof fs[key] === 'function') {
|
||||
_fsMock[key] = function () {
|
||||
return fs[key].apply(fs, arguments);
|
||||
};
|
||||
}
|
||||
else {
|
||||
_fsMock[key] = fs[key];
|
||||
}
|
||||
});
|
||||
_fsMock['changeFSModule'] = function (newFs) {
|
||||
fs = newFs;
|
||||
};
|
||||
_fsMock['getFSModule'] = function () {
|
||||
return fs;
|
||||
};
|
||||
_fsMock['FS'] = FS_1.default;
|
||||
_fsMock['Stats'] = FS_1.default.Stats;
|
||||
_fsMock['F_OK'] = 0;
|
||||
_fsMock['R_OK'] = 4;
|
||||
_fsMock['W_OK'] = 2;
|
||||
_fsMock['X_OK'] = 1;
|
||||
exports.default = _fsMock;
|
||||
//# sourceMappingURL=node_fs.js.map
|
||||
83
sandpack-generated/static/browserfs11/node/core/node_fs_stats.d.ts
vendored
Normal file
83
sandpack-generated/static/browserfs11/node/core/node_fs_stats.d.ts
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/// <reference types="node" />
|
||||
/// <reference types="node" />
|
||||
import * as fs from 'fs';
|
||||
/**
|
||||
* Indicates the type of the given file. Applied to 'mode'.
|
||||
*/
|
||||
export declare enum FileType {
|
||||
FILE = 32768,
|
||||
DIRECTORY = 16384,
|
||||
SYMLINK = 40960
|
||||
}
|
||||
/**
|
||||
* Emulation of Node's `fs.Stats` object.
|
||||
*
|
||||
* Attribute descriptions are from `man 2 stat'
|
||||
* @see http://nodejs.org/api/fs.html#fs_class_fs_stats
|
||||
* @see http://man7.org/linux/man-pages/man2/stat.2.html
|
||||
*/
|
||||
export default class Stats implements fs.Stats {
|
||||
static fromBuffer(buffer: Buffer): Stats;
|
||||
/**
|
||||
* Clones the stats object.
|
||||
*/
|
||||
static clone(s: Stats): Stats;
|
||||
blocks: number;
|
||||
mode: number;
|
||||
/**
|
||||
* UNSUPPORTED ATTRIBUTES
|
||||
* I assume no one is going to need these details, although we could fake
|
||||
* appropriate values if need be.
|
||||
*/
|
||||
dev: number;
|
||||
ino: number;
|
||||
rdev: number;
|
||||
nlink: number;
|
||||
blksize: number;
|
||||
uid: number;
|
||||
gid: number;
|
||||
fileData: Buffer | null;
|
||||
atimeMs: number;
|
||||
mtimeMs: number;
|
||||
ctimeMs: number;
|
||||
birthtimeMs: number;
|
||||
size: number;
|
||||
get atime(): Date;
|
||||
get mtime(): Date;
|
||||
get ctime(): Date;
|
||||
get birthtime(): Date;
|
||||
/**
|
||||
* Provides information about a particular entry in the file system.
|
||||
* @param itemType Type of the item (FILE, DIRECTORY, SYMLINK, or SOCKET)
|
||||
* @param size Size of the item in bytes. For directories/symlinks,
|
||||
* this is normally the size of the struct that represents the item.
|
||||
* @param mode Unix-style file mode (e.g. 0o644)
|
||||
* @param atimeMs time of last access, in milliseconds since epoch
|
||||
* @param mtimeMs time of last modification, in milliseconds since epoch
|
||||
* @param ctimeMs time of last time file status was changed, in milliseconds since epoch
|
||||
* @param birthtimeMs time of file creation, in milliseconds since epoch
|
||||
*/
|
||||
constructor(itemType: FileType, size: number, mode?: number, atimeMs?: number, mtimeMs?: number, ctimeMs?: number, birthtimeMs?: number);
|
||||
toBuffer(): Buffer;
|
||||
/**
|
||||
* @return [Boolean] True if this item is a file.
|
||||
*/
|
||||
isFile(): boolean;
|
||||
/**
|
||||
* @return [Boolean] True if this item is a directory.
|
||||
*/
|
||||
isDirectory(): boolean;
|
||||
/**
|
||||
* @return [Boolean] True if this item is a symbolic link (only valid through lstat)
|
||||
*/
|
||||
isSymbolicLink(): boolean;
|
||||
/**
|
||||
* Change the mode of the file. We use this helper function to prevent messing
|
||||
* up the type of the file, which is encoded in mode.
|
||||
*/
|
||||
chmod(mode: number): void;
|
||||
isSocket(): boolean;
|
||||
isBlockDevice(): boolean;
|
||||
isCharacterDevice(): boolean;
|
||||
isFIFO(): boolean;
|
||||
}
|
||||
192
sandpack-generated/static/browserfs11/node/core/node_fs_stats.js
Normal file
192
sandpack-generated/static/browserfs11/node/core/node_fs_stats.js
Normal file
@@ -0,0 +1,192 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.FileType = void 0;
|
||||
/**
|
||||
* Indicates the type of the given file. Applied to 'mode'.
|
||||
*/
|
||||
var FileType;
|
||||
(function (FileType) {
|
||||
FileType[FileType["FILE"] = 32768] = "FILE";
|
||||
FileType[FileType["DIRECTORY"] = 16384] = "DIRECTORY";
|
||||
FileType[FileType["SYMLINK"] = 40960] = "SYMLINK";
|
||||
})(FileType || (exports.FileType = FileType = {}));
|
||||
/**
|
||||
* Emulation of Node's `fs.Stats` object.
|
||||
*
|
||||
* Attribute descriptions are from `man 2 stat'
|
||||
* @see http://nodejs.org/api/fs.html#fs_class_fs_stats
|
||||
* @see http://man7.org/linux/man-pages/man2/stat.2.html
|
||||
*/
|
||||
var Stats = /** @class */ (function () {
|
||||
/**
|
||||
* Provides information about a particular entry in the file system.
|
||||
* @param itemType Type of the item (FILE, DIRECTORY, SYMLINK, or SOCKET)
|
||||
* @param size Size of the item in bytes. For directories/symlinks,
|
||||
* this is normally the size of the struct that represents the item.
|
||||
* @param mode Unix-style file mode (e.g. 0o644)
|
||||
* @param atimeMs time of last access, in milliseconds since epoch
|
||||
* @param mtimeMs time of last modification, in milliseconds since epoch
|
||||
* @param ctimeMs time of last time file status was changed, in milliseconds since epoch
|
||||
* @param birthtimeMs time of file creation, in milliseconds since epoch
|
||||
*/
|
||||
function Stats(itemType, size, mode, atimeMs, mtimeMs, ctimeMs, birthtimeMs) {
|
||||
/**
|
||||
* UNSUPPORTED ATTRIBUTES
|
||||
* I assume no one is going to need these details, although we could fake
|
||||
* appropriate values if need be.
|
||||
*/
|
||||
// ID of device containing file
|
||||
this.dev = 0;
|
||||
// inode number
|
||||
this.ino = 0;
|
||||
// device ID (if special file)
|
||||
this.rdev = 0;
|
||||
// number of hard links
|
||||
this.nlink = 1;
|
||||
// blocksize for file system I/O
|
||||
this.blksize = 4096;
|
||||
// @todo Maybe support these? atm, it's a one-user filesystem.
|
||||
// user ID of owner
|
||||
this.uid = 0;
|
||||
// group ID of owner
|
||||
this.gid = 0;
|
||||
// XXX: Some file systems stash data on stats objects.
|
||||
this.fileData = null;
|
||||
this.size = size;
|
||||
var currentTime = 0;
|
||||
if (typeof (atimeMs) !== 'number') {
|
||||
currentTime = Date.now();
|
||||
atimeMs = currentTime;
|
||||
}
|
||||
if (typeof (mtimeMs) !== 'number') {
|
||||
if (!currentTime) {
|
||||
currentTime = Date.now();
|
||||
}
|
||||
mtimeMs = currentTime;
|
||||
}
|
||||
if (typeof (ctimeMs) !== 'number') {
|
||||
if (!currentTime) {
|
||||
currentTime = Date.now();
|
||||
}
|
||||
ctimeMs = currentTime;
|
||||
}
|
||||
if (typeof (birthtimeMs) !== 'number') {
|
||||
if (!currentTime) {
|
||||
currentTime = Date.now();
|
||||
}
|
||||
birthtimeMs = currentTime;
|
||||
}
|
||||
this.atimeMs = atimeMs;
|
||||
this.ctimeMs = ctimeMs;
|
||||
this.mtimeMs = mtimeMs;
|
||||
this.birthtimeMs = birthtimeMs;
|
||||
if (!mode) {
|
||||
switch (itemType) {
|
||||
case FileType.FILE:
|
||||
this.mode = 0x1a4;
|
||||
break;
|
||||
case FileType.DIRECTORY:
|
||||
default:
|
||||
this.mode = 0x1ff;
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.mode = mode;
|
||||
}
|
||||
// number of 512B blocks allocated
|
||||
this.blocks = Math.ceil(size / 512);
|
||||
// Check if mode also includes top-most bits, which indicate the file's
|
||||
// type.
|
||||
if (this.mode < 0x1000) {
|
||||
this.mode |= itemType;
|
||||
}
|
||||
}
|
||||
Stats.fromBuffer = function (buffer) {
|
||||
var size = buffer.readUInt32LE(0), mode = buffer.readUInt32LE(4), atime = buffer.readDoubleLE(8), mtime = buffer.readDoubleLE(16), ctime = buffer.readDoubleLE(24);
|
||||
return new Stats(mode & 0xF000, size, mode & 0xFFF, atime, mtime, ctime);
|
||||
};
|
||||
/**
|
||||
* Clones the stats object.
|
||||
*/
|
||||
Stats.clone = function (s) {
|
||||
return new Stats(s.mode & 0xF000, s.size, s.mode & 0xFFF, s.atimeMs, s.mtimeMs, s.ctimeMs, s.birthtimeMs);
|
||||
};
|
||||
Object.defineProperty(Stats.prototype, "atime", {
|
||||
get: function () {
|
||||
return new Date(this.atimeMs);
|
||||
},
|
||||
enumerable: false,
|
||||
configurable: true
|
||||
});
|
||||
Object.defineProperty(Stats.prototype, "mtime", {
|
||||
get: function () {
|
||||
return new Date(this.mtimeMs);
|
||||
},
|
||||
enumerable: false,
|
||||
configurable: true
|
||||
});
|
||||
Object.defineProperty(Stats.prototype, "ctime", {
|
||||
get: function () {
|
||||
return new Date(this.ctimeMs);
|
||||
},
|
||||
enumerable: false,
|
||||
configurable: true
|
||||
});
|
||||
Object.defineProperty(Stats.prototype, "birthtime", {
|
||||
get: function () {
|
||||
return new Date(this.birthtimeMs);
|
||||
},
|
||||
enumerable: false,
|
||||
configurable: true
|
||||
});
|
||||
Stats.prototype.toBuffer = function () {
|
||||
var buffer = Buffer.alloc(32);
|
||||
buffer.writeUInt32LE(this.size, 0);
|
||||
buffer.writeUInt32LE(this.mode, 4);
|
||||
buffer.writeDoubleLE(this.atime.getTime(), 8);
|
||||
buffer.writeDoubleLE(this.mtime.getTime(), 16);
|
||||
buffer.writeDoubleLE(this.ctime.getTime(), 24);
|
||||
return buffer;
|
||||
};
|
||||
/**
|
||||
* @return [Boolean] True if this item is a file.
|
||||
*/
|
||||
Stats.prototype.isFile = function () {
|
||||
return (this.mode & 0xF000) === FileType.FILE;
|
||||
};
|
||||
/**
|
||||
* @return [Boolean] True if this item is a directory.
|
||||
*/
|
||||
Stats.prototype.isDirectory = function () {
|
||||
return (this.mode & 0xF000) === FileType.DIRECTORY;
|
||||
};
|
||||
/**
|
||||
* @return [Boolean] True if this item is a symbolic link (only valid through lstat)
|
||||
*/
|
||||
Stats.prototype.isSymbolicLink = function () {
|
||||
return (this.mode & 0xF000) === FileType.SYMLINK;
|
||||
};
|
||||
/**
|
||||
* Change the mode of the file. We use this helper function to prevent messing
|
||||
* up the type of the file, which is encoded in mode.
|
||||
*/
|
||||
Stats.prototype.chmod = function (mode) {
|
||||
this.mode = (this.mode & 0xF000) | mode;
|
||||
};
|
||||
// We don't support the following types of files.
|
||||
Stats.prototype.isSocket = function () {
|
||||
return false;
|
||||
};
|
||||
Stats.prototype.isBlockDevice = function () {
|
||||
return false;
|
||||
};
|
||||
Stats.prototype.isCharacterDevice = function () {
|
||||
return false;
|
||||
};
|
||||
Stats.prototype.isFIFO = function () {
|
||||
return false;
|
||||
};
|
||||
return Stats;
|
||||
}());
|
||||
exports.default = Stats;
|
||||
//# sourceMappingURL=node_fs_stats.js.map
|
||||
83
sandpack-generated/static/browserfs11/node/core/util.d.ts
vendored
Normal file
83
sandpack-generated/static/browserfs11/node/core/util.d.ts
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/// <reference types="node" />
|
||||
/**
|
||||
* Grab bag of utility functions used across the code.
|
||||
*/
|
||||
import { FileSystem, BFSOneArgCallback, FileSystemConstructor } from './file_system';
|
||||
export declare function deprecationMessage(print: boolean, fsName: string, opts: any): void;
|
||||
/**
|
||||
* Checks for any IE version, including IE11 which removed MSIE from the
|
||||
* userAgent string.
|
||||
* @hidden
|
||||
*/
|
||||
export declare const isIE: boolean;
|
||||
/**
|
||||
* Check if we're in a web worker.
|
||||
* @hidden
|
||||
*/
|
||||
export declare const isWebWorker: boolean;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export interface Arrayish<T> {
|
||||
[idx: number]: T;
|
||||
length: number;
|
||||
}
|
||||
/**
|
||||
* Throws an exception. Called on code paths that should be impossible.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function fail(): void;
|
||||
/**
|
||||
* Synchronous recursive makedir.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function mkdirpSync(p: string, mode: number, fs: FileSystem): void;
|
||||
/**
|
||||
* Converts a buffer into an array buffer. Attempts to do so in a
|
||||
* zero-copy manner, e.g. the array references the same memory.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function buffer2ArrayBuffer(buff: Buffer): ArrayBuffer | SharedArrayBuffer;
|
||||
/**
|
||||
* Converts a buffer into a Uint8Array. Attempts to do so in a
|
||||
* zero-copy manner, e.g. the array references the same memory.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function buffer2Uint8array(buff: Buffer): Uint8Array;
|
||||
/**
|
||||
* Converts the given arrayish object into a Buffer. Attempts to
|
||||
* be zero-copy.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function arrayish2Buffer(arr: Arrayish<number>): Buffer;
|
||||
/**
|
||||
* Converts the given Uint8Array into a Buffer. Attempts to be zero-copy.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function uint8Array2Buffer(u8: Uint8Array): Buffer;
|
||||
/**
|
||||
* Converts the given array buffer into a Buffer. Attempts to be
|
||||
* zero-copy.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function arrayBuffer2Buffer(ab: ArrayBuffer | SharedArrayBuffer): Buffer;
|
||||
/**
|
||||
* Copies a slice of the given buffer
|
||||
* @hidden
|
||||
*/
|
||||
export declare function copyingSlice(buff: Buffer, start?: number, end?: number): Buffer;
|
||||
/**
|
||||
* Returns an empty buffer.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function emptyBuffer(): Buffer;
|
||||
/**
|
||||
* Option validator for a Buffer file system option.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function bufferValidator(v: object, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Checks that the given options object is valid for the file system options.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function checkOptions(fsType: FileSystemConstructor, opts: any, cb: BFSOneArgCallback): void;
|
||||
257
sandpack-generated/static/browserfs11/node/core/util.js
Normal file
257
sandpack-generated/static/browserfs11/node/core/util.js
Normal file
@@ -0,0 +1,257 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.checkOptions = exports.bufferValidator = exports.emptyBuffer = exports.copyingSlice = exports.arrayBuffer2Buffer = exports.uint8Array2Buffer = exports.arrayish2Buffer = exports.buffer2Uint8array = exports.buffer2ArrayBuffer = exports.mkdirpSync = exports.fail = exports.isWebWorker = exports.isIE = exports.deprecationMessage = void 0;
|
||||
var api_error_1 = require("./api_error");
|
||||
var levenshtein_1 = require("./levenshtein");
|
||||
var path = require("path");
|
||||
function deprecationMessage(print, fsName, opts) {
|
||||
if (print) {
|
||||
// tslint:disable-next-line:no-console
|
||||
console.warn("[".concat(fsName, "] Direct file system constructor usage is deprecated for this file system, and will be removed in the next major version. Please use the '").concat(fsName, ".Create(").concat(JSON.stringify(opts), ", callback)' method instead. See https://github.com/jvilk/BrowserFS/issues/176 for more details."));
|
||||
// tslint:enable-next-line:no-console
|
||||
}
|
||||
}
|
||||
exports.deprecationMessage = deprecationMessage;
|
||||
/**
|
||||
* Checks for any IE version, including IE11 which removed MSIE from the
|
||||
* userAgent string.
|
||||
* @hidden
|
||||
*/
|
||||
exports.isIE = typeof navigator !== "undefined" && Boolean(/(msie) ([\w.]+)/.exec(navigator.userAgent.toLowerCase()) || navigator.userAgent.indexOf('Trident') !== -1);
|
||||
/**
|
||||
* Check if we're in a web worker.
|
||||
* @hidden
|
||||
*/
|
||||
exports.isWebWorker = typeof window === "undefined";
|
||||
/**
|
||||
* Throws an exception. Called on code paths that should be impossible.
|
||||
* @hidden
|
||||
*/
|
||||
function fail() {
|
||||
throw new Error("BFS has reached an impossible code path; please file a bug.");
|
||||
}
|
||||
exports.fail = fail;
|
||||
/**
|
||||
* Synchronous recursive makedir.
|
||||
* @hidden
|
||||
*/
|
||||
function mkdirpSync(p, mode, fs) {
|
||||
if (!fs.existsSync(p)) {
|
||||
mkdirpSync(path.dirname(p), mode, fs);
|
||||
fs.mkdirSync(p, mode);
|
||||
}
|
||||
}
|
||||
exports.mkdirpSync = mkdirpSync;
|
||||
/**
|
||||
* Converts a buffer into an array buffer. Attempts to do so in a
|
||||
* zero-copy manner, e.g. the array references the same memory.
|
||||
* @hidden
|
||||
*/
|
||||
function buffer2ArrayBuffer(buff) {
|
||||
var u8 = buffer2Uint8array(buff), u8offset = u8.byteOffset, u8Len = u8.byteLength;
|
||||
if (u8offset === 0 && u8Len === u8.buffer.byteLength) {
|
||||
return u8.buffer;
|
||||
}
|
||||
else {
|
||||
return u8.buffer.slice(u8offset, u8offset + u8Len);
|
||||
}
|
||||
}
|
||||
exports.buffer2ArrayBuffer = buffer2ArrayBuffer;
|
||||
/**
|
||||
* Converts a buffer into a Uint8Array. Attempts to do so in a
|
||||
* zero-copy manner, e.g. the array references the same memory.
|
||||
* @hidden
|
||||
*/
|
||||
function buffer2Uint8array(buff) {
|
||||
if (buff instanceof Uint8Array) {
|
||||
// BFS & Node v4.0 buffers *are* Uint8Arrays.
|
||||
return buff;
|
||||
}
|
||||
else {
|
||||
// Uint8Arrays can be constructed from arrayish numbers.
|
||||
// At this point, we assume this isn't a BFS array.
|
||||
return new Uint8Array(buff);
|
||||
}
|
||||
}
|
||||
exports.buffer2Uint8array = buffer2Uint8array;
|
||||
/**
|
||||
* Converts the given arrayish object into a Buffer. Attempts to
|
||||
* be zero-copy.
|
||||
* @hidden
|
||||
*/
|
||||
function arrayish2Buffer(arr) {
|
||||
if (arr instanceof Buffer) {
|
||||
return arr;
|
||||
}
|
||||
else if (arr instanceof Uint8Array) {
|
||||
return uint8Array2Buffer(arr);
|
||||
}
|
||||
else {
|
||||
return Buffer.from(arr);
|
||||
}
|
||||
}
|
||||
exports.arrayish2Buffer = arrayish2Buffer;
|
||||
/**
|
||||
* Converts the given Uint8Array into a Buffer. Attempts to be zero-copy.
|
||||
* @hidden
|
||||
*/
|
||||
function uint8Array2Buffer(u8) {
|
||||
if (u8 instanceof Buffer) {
|
||||
return u8;
|
||||
}
|
||||
else if (u8.byteOffset === 0 && u8.byteLength === u8.buffer.byteLength) {
|
||||
return arrayBuffer2Buffer(u8.buffer);
|
||||
}
|
||||
else {
|
||||
return Buffer.from(u8.buffer, u8.byteOffset, u8.byteLength);
|
||||
}
|
||||
}
|
||||
exports.uint8Array2Buffer = uint8Array2Buffer;
|
||||
/**
|
||||
* Converts the given array buffer into a Buffer. Attempts to be
|
||||
* zero-copy.
|
||||
* @hidden
|
||||
*/
|
||||
function arrayBuffer2Buffer(ab) {
|
||||
return Buffer.from(ab);
|
||||
}
|
||||
exports.arrayBuffer2Buffer = arrayBuffer2Buffer;
|
||||
/**
|
||||
* Copies a slice of the given buffer
|
||||
* @hidden
|
||||
*/
|
||||
function copyingSlice(buff, start, end) {
|
||||
if (start === void 0) { start = 0; }
|
||||
if (end === void 0) { end = buff.length; }
|
||||
if (start < 0 || end < 0 || end > buff.length || start > end) {
|
||||
throw new TypeError("Invalid slice bounds on buffer of length ".concat(buff.length, ": [").concat(start, ", ").concat(end, "]"));
|
||||
}
|
||||
if (buff.length === 0) {
|
||||
// Avoid s0 corner case in ArrayBuffer case.
|
||||
return emptyBuffer();
|
||||
}
|
||||
else {
|
||||
var u8 = buffer2Uint8array(buff), s0 = buff[0], newS0 = (s0 + 1) % 0xFF;
|
||||
buff[0] = newS0;
|
||||
if (u8[0] === newS0) {
|
||||
// Same memory. Revert & copy.
|
||||
u8[0] = s0;
|
||||
return uint8Array2Buffer(u8.slice(start, end));
|
||||
}
|
||||
else {
|
||||
// Revert.
|
||||
buff[0] = s0;
|
||||
return uint8Array2Buffer(u8.subarray(start, end));
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.copyingSlice = copyingSlice;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var emptyBuff = null;
|
||||
/**
|
||||
* Returns an empty buffer.
|
||||
* @hidden
|
||||
*/
|
||||
function emptyBuffer() {
|
||||
if (emptyBuff) {
|
||||
return emptyBuff;
|
||||
}
|
||||
return emptyBuff = Buffer.alloc(0);
|
||||
}
|
||||
exports.emptyBuffer = emptyBuffer;
|
||||
/**
|
||||
* Option validator for a Buffer file system option.
|
||||
* @hidden
|
||||
*/
|
||||
function bufferValidator(v, cb) {
|
||||
if (Buffer.isBuffer(v)) {
|
||||
cb();
|
||||
}
|
||||
else {
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "option must be a Buffer."));
|
||||
}
|
||||
}
|
||||
exports.bufferValidator = bufferValidator;
|
||||
/**
|
||||
* Checks that the given options object is valid for the file system options.
|
||||
* @hidden
|
||||
*/
|
||||
function checkOptions(fsType, opts, cb) {
|
||||
var optsInfo = fsType.Options;
|
||||
var fsName = fsType.Name;
|
||||
var pendingValidators = 0;
|
||||
var callbackCalled = false;
|
||||
var loopEnded = false;
|
||||
function validatorCallback(e) {
|
||||
if (!callbackCalled) {
|
||||
if (e) {
|
||||
callbackCalled = true;
|
||||
cb(e);
|
||||
}
|
||||
pendingValidators--;
|
||||
if (pendingValidators === 0 && loopEnded) {
|
||||
cb();
|
||||
}
|
||||
}
|
||||
}
|
||||
var _loop_1 = function (optName) {
|
||||
if (optsInfo.hasOwnProperty(optName)) {
|
||||
var opt = optsInfo[optName];
|
||||
var providedValue = opts[optName];
|
||||
if (providedValue === undefined || providedValue === null) {
|
||||
if (!opt.optional) {
|
||||
// Required option, not provided.
|
||||
// Any incorrect options provided? Which ones are close to the provided one?
|
||||
// (edit distance 5 === close)
|
||||
var incorrectOptions = Object.keys(opts).filter(function (o) { return !(o in optsInfo); }).map(function (a) {
|
||||
return { str: a, distance: (0, levenshtein_1.default)(optName, a) };
|
||||
}).filter(function (o) { return o.distance < 5; }).sort(function (a, b) { return a.distance - b.distance; });
|
||||
// Validators may be synchronous.
|
||||
if (callbackCalled) {
|
||||
return { value: void 0 };
|
||||
}
|
||||
callbackCalled = true;
|
||||
return { value: cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "[".concat(fsName, "] Required option '").concat(optName, "' not provided.").concat(incorrectOptions.length > 0 ? " You provided unrecognized option '".concat(incorrectOptions[0].str, "'; perhaps you meant to type '").concat(optName, "'.") : '', "\nOption description: ").concat(opt.description))) };
|
||||
}
|
||||
// Else: Optional option, not provided. That is OK.
|
||||
}
|
||||
else {
|
||||
// Option provided! Check type.
|
||||
var typeMatches = false;
|
||||
if (Array.isArray(opt.type)) {
|
||||
typeMatches = opt.type.indexOf(typeof (providedValue)) !== -1;
|
||||
}
|
||||
else {
|
||||
typeMatches = typeof (providedValue) === opt.type;
|
||||
}
|
||||
if (!typeMatches) {
|
||||
// Validators may be synchronous.
|
||||
if (callbackCalled) {
|
||||
return { value: void 0 };
|
||||
}
|
||||
callbackCalled = true;
|
||||
return { value: cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "[".concat(fsName, "] Value provided for option ").concat(optName, " is not the proper type. Expected ").concat(Array.isArray(opt.type) ? "one of {".concat(opt.type.join(", "), "}") : opt.type, ", but received ").concat(typeof (providedValue), "\nOption description: ").concat(opt.description))) };
|
||||
}
|
||||
else if (opt.validator) {
|
||||
pendingValidators++;
|
||||
opt.validator(providedValue, validatorCallback);
|
||||
}
|
||||
// Otherwise: All good!
|
||||
}
|
||||
}
|
||||
};
|
||||
// Check for required options.
|
||||
for (var optName in optsInfo) {
|
||||
var state_1 = _loop_1(optName);
|
||||
if (typeof state_1 === "object")
|
||||
return state_1.value;
|
||||
}
|
||||
loopEnded = true;
|
||||
if (pendingValidators === 0 && !callbackCalled) {
|
||||
cb();
|
||||
}
|
||||
}
|
||||
exports.checkOptions = checkOptions;
|
||||
//# sourceMappingURL=util.js.map
|
||||
1
sandpack-generated/static/browserfs11/node/generic/dropbox_bridge_actual.d.ts
vendored
Normal file
1
sandpack-generated/static/browserfs11/node/generic/dropbox_bridge_actual.d.ts
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export declare const Dropbox: any;
|
||||
@@ -0,0 +1,7 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Dropbox = void 0;
|
||||
var global_1 = require("../core/global");
|
||||
// If Dropbox isn't on the webpage, then set this to null.
|
||||
exports.Dropbox = global_1.default.Dropbox ? global_1.default.Dropbox.Dropbox : undefined;
|
||||
//# sourceMappingURL=dropbox_bridge_actual.js.map
|
||||
125
sandpack-generated/static/browserfs11/node/generic/emscripten_fs.d.ts
vendored
Normal file
125
sandpack-generated/static/browserfs11/node/generic/emscripten_fs.d.ts
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
/**
|
||||
* Defines an Emscripten file system object for use in the Emscripten virtual
|
||||
* filesystem. Allows you to use synchronous BrowserFS file systems from within
|
||||
* Emscripten.
|
||||
*
|
||||
* You can construct a BFSEmscriptenFS, mount it using its mount command,
|
||||
* and then mount it into Emscripten.
|
||||
*
|
||||
* Adapted from Emscripten's NodeFS:
|
||||
* https://raw.github.com/kripken/emscripten/master/src/library_nodefs.js
|
||||
*/
|
||||
import FS from '../core/FS';
|
||||
export interface Stats {
|
||||
dev: number;
|
||||
ino: number;
|
||||
mode: number;
|
||||
nlink: number;
|
||||
uid: number;
|
||||
gid: number;
|
||||
rdev: number;
|
||||
size: number;
|
||||
blksize: number;
|
||||
blocks: number;
|
||||
atime: Date;
|
||||
mtime: Date;
|
||||
ctime: Date;
|
||||
timestamp?: number;
|
||||
}
|
||||
export interface EmscriptenFSNode {
|
||||
name: string;
|
||||
mode: number;
|
||||
parent: EmscriptenFSNode;
|
||||
mount: {
|
||||
opts: {
|
||||
root: string;
|
||||
};
|
||||
};
|
||||
stream_ops: EmscriptenStreamOps;
|
||||
node_ops: EmscriptenNodeOps;
|
||||
}
|
||||
export interface EmscriptenStream {
|
||||
node: EmscriptenFSNode;
|
||||
nfd: any;
|
||||
flags: string;
|
||||
position: number;
|
||||
}
|
||||
export interface EmscriptenNodeOps {
|
||||
getattr(node: EmscriptenFSNode): Stats;
|
||||
setattr(node: EmscriptenFSNode, attr: Stats): void;
|
||||
lookup(parent: EmscriptenFSNode, name: string): EmscriptenFSNode;
|
||||
mknod(parent: EmscriptenFSNode, name: string, mode: number, dev: any): EmscriptenFSNode;
|
||||
rename(oldNode: EmscriptenFSNode, newDir: EmscriptenFSNode, newName: string): void;
|
||||
unlink(parent: EmscriptenFSNode, name: string): void;
|
||||
rmdir(parent: EmscriptenFSNode, name: string): void;
|
||||
readdir(node: EmscriptenFSNode): string[];
|
||||
symlink(parent: EmscriptenFSNode, newName: string, oldPath: string): void;
|
||||
readlink(node: EmscriptenFSNode): string;
|
||||
}
|
||||
export interface EmscriptenStreamOps {
|
||||
open(stream: EmscriptenStream): void;
|
||||
close(stream: EmscriptenStream): void;
|
||||
read(stream: EmscriptenStream, buffer: Uint8Array, offset: number, length: number, position: number): number;
|
||||
write(stream: EmscriptenStream, buffer: Uint8Array, offset: number, length: number, position: number): number;
|
||||
llseek(stream: EmscriptenStream, offset: number, whence: number): number;
|
||||
}
|
||||
export interface EmscriptenFS {
|
||||
node_ops: EmscriptenNodeOps;
|
||||
stream_ops: EmscriptenStreamOps;
|
||||
mount(mount: {
|
||||
opts: {
|
||||
root: string;
|
||||
};
|
||||
}): EmscriptenFSNode;
|
||||
createNode(parent: EmscriptenFSNode, name: string, mode: number, dev?: any): EmscriptenFSNode;
|
||||
getMode(path: string): number;
|
||||
realPath(node: EmscriptenFSNode): string;
|
||||
}
|
||||
export default class BFSEmscriptenFS implements EmscriptenFS {
|
||||
flagsToPermissionStringMap: {
|
||||
0: string;
|
||||
1: string;
|
||||
2: string;
|
||||
64: string;
|
||||
65: string;
|
||||
66: string;
|
||||
129: string;
|
||||
193: string;
|
||||
514: string;
|
||||
577: string;
|
||||
578: string;
|
||||
705: string;
|
||||
706: string;
|
||||
1024: string;
|
||||
1025: string;
|
||||
1026: string;
|
||||
1089: string;
|
||||
1090: string;
|
||||
1153: string;
|
||||
1154: string;
|
||||
1217: string;
|
||||
1218: string;
|
||||
4096: string;
|
||||
4098: string;
|
||||
};
|
||||
node_ops: EmscriptenNodeOps;
|
||||
stream_ops: EmscriptenStreamOps;
|
||||
private FS;
|
||||
private PATH;
|
||||
private ERRNO_CODES;
|
||||
private nodefs;
|
||||
constructor(_FS?: any, _PATH?: any, _ERRNO_CODES?: any, nodefs?: FS);
|
||||
mount(m: {
|
||||
opts: {
|
||||
root: string;
|
||||
};
|
||||
}): EmscriptenFSNode;
|
||||
createNode(parent: EmscriptenFSNode | null, name: string, mode: number, dev?: any): EmscriptenFSNode;
|
||||
getMode(path: string): number;
|
||||
realPath(node: EmscriptenFSNode): string;
|
||||
flagsToPermissionString(flags: string | number): string;
|
||||
getNodeFS(): FS;
|
||||
getFS(): any;
|
||||
getPATH(): any;
|
||||
getERRNO_CODES(): any;
|
||||
}
|
||||
@@ -0,0 +1,365 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var node_fs_1 = require("../core/node_fs");
|
||||
var util_1 = require("../core/util");
|
||||
var BFSEmscriptenStreamOps = /** @class */ (function () {
|
||||
function BFSEmscriptenStreamOps(fs) {
|
||||
this.fs = fs;
|
||||
this.nodefs = fs.getNodeFS();
|
||||
this.FS = fs.getFS();
|
||||
this.PATH = fs.getPATH();
|
||||
this.ERRNO_CODES = fs.getERRNO_CODES();
|
||||
}
|
||||
BFSEmscriptenStreamOps.prototype.open = function (stream) {
|
||||
var path = this.fs.realPath(stream.node);
|
||||
var FS = this.FS;
|
||||
try {
|
||||
if (FS.isFile(stream.node.mode)) {
|
||||
stream.nfd = this.nodefs.openSync(path, this.fs.flagsToPermissionString(stream.flags));
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenStreamOps.prototype.close = function (stream) {
|
||||
var FS = this.FS;
|
||||
try {
|
||||
if (FS.isFile(stream.node.mode) && stream.nfd) {
|
||||
this.nodefs.closeSync(stream.nfd);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenStreamOps.prototype.read = function (stream, buffer, offset, length, position) {
|
||||
// Avoid copying overhead by reading directly into buffer.
|
||||
try {
|
||||
return this.nodefs.readSync(stream.nfd, (0, util_1.uint8Array2Buffer)(buffer), offset, length, position);
|
||||
}
|
||||
catch (e) {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenStreamOps.prototype.write = function (stream, buffer, offset, length, position) {
|
||||
// Avoid copying overhead.
|
||||
try {
|
||||
return this.nodefs.writeSync(stream.nfd, (0, util_1.uint8Array2Buffer)(buffer), offset, length, position);
|
||||
}
|
||||
catch (e) {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenStreamOps.prototype.llseek = function (stream, offset, whence) {
|
||||
var position = offset;
|
||||
if (whence === 1) { // SEEK_CUR.
|
||||
position += stream.position;
|
||||
}
|
||||
else if (whence === 2) { // SEEK_END.
|
||||
if (this.FS.isFile(stream.node.mode)) {
|
||||
try {
|
||||
var stat = this.nodefs.fstatSync(stream.nfd);
|
||||
position += stat.size;
|
||||
}
|
||||
catch (e) {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (position < 0) {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES.EINVAL);
|
||||
}
|
||||
stream.position = position;
|
||||
return position;
|
||||
};
|
||||
return BFSEmscriptenStreamOps;
|
||||
}());
|
||||
var BFSEmscriptenNodeOps = /** @class */ (function () {
|
||||
function BFSEmscriptenNodeOps(fs) {
|
||||
this.fs = fs;
|
||||
this.nodefs = fs.getNodeFS();
|
||||
this.FS = fs.getFS();
|
||||
this.PATH = fs.getPATH();
|
||||
this.ERRNO_CODES = fs.getERRNO_CODES();
|
||||
}
|
||||
BFSEmscriptenNodeOps.prototype.getattr = function (node) {
|
||||
var path = this.fs.realPath(node);
|
||||
var stat;
|
||||
try {
|
||||
stat = this.nodefs.lstatSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
return {
|
||||
dev: stat.dev,
|
||||
ino: stat.ino,
|
||||
mode: stat.mode,
|
||||
nlink: stat.nlink,
|
||||
uid: stat.uid,
|
||||
gid: stat.gid,
|
||||
rdev: stat.rdev,
|
||||
size: stat.size,
|
||||
atime: stat.atime,
|
||||
mtime: stat.mtime,
|
||||
ctime: stat.ctime,
|
||||
blksize: stat.blksize,
|
||||
blocks: stat.blocks
|
||||
};
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.setattr = function (node, attr) {
|
||||
var path = this.fs.realPath(node);
|
||||
try {
|
||||
if (attr.mode !== undefined) {
|
||||
this.nodefs.chmodSync(path, attr.mode);
|
||||
// update the common node structure mode as well
|
||||
node.mode = attr.mode;
|
||||
}
|
||||
if (attr.timestamp !== undefined) {
|
||||
var date = new Date(attr.timestamp);
|
||||
this.nodefs.utimesSync(path, date, date);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
// Ignore not supported errors. Emscripten does utimesSync when it
|
||||
// writes files, but never really requires the value to be set.
|
||||
if (e.code !== "ENOTSUP") {
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
}
|
||||
if (attr.size !== undefined) {
|
||||
try {
|
||||
this.nodefs.truncateSync(path, attr.size);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.lookup = function (parent, name) {
|
||||
var path = this.PATH.join2(this.fs.realPath(parent), name);
|
||||
var mode = this.fs.getMode(path);
|
||||
return this.fs.createNode(parent, name, mode);
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.mknod = function (parent, name, mode, dev) {
|
||||
var node = this.fs.createNode(parent, name, mode, dev);
|
||||
// create the backing node for this in the fs root as well
|
||||
var path = this.fs.realPath(node);
|
||||
try {
|
||||
if (this.FS.isDir(node.mode)) {
|
||||
this.nodefs.mkdirSync(path, node.mode);
|
||||
}
|
||||
else {
|
||||
this.nodefs.writeFileSync(path, '', { mode: node.mode });
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
return node;
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.rename = function (oldNode, newDir, newName) {
|
||||
var oldPath = this.fs.realPath(oldNode);
|
||||
var newPath = this.PATH.join2(this.fs.realPath(newDir), newName);
|
||||
try {
|
||||
this.nodefs.renameSync(oldPath, newPath);
|
||||
// This logic is missing from the original NodeFS,
|
||||
// causing Emscripten's filesystem to think that the old file still exists.
|
||||
oldNode.name = newName;
|
||||
oldNode.parent = newDir;
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.unlink = function (parent, name) {
|
||||
var path = this.PATH.join2(this.fs.realPath(parent), name);
|
||||
try {
|
||||
this.nodefs.unlinkSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.rmdir = function (parent, name) {
|
||||
var path = this.PATH.join2(this.fs.realPath(parent), name);
|
||||
try {
|
||||
this.nodefs.rmdirSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.readdir = function (node) {
|
||||
var path = this.fs.realPath(node);
|
||||
try {
|
||||
// Node does not list . and .. in directory listings,
|
||||
// but Emscripten expects it.
|
||||
var contents = this.nodefs.readdirSync(path);
|
||||
contents.push('.', '..');
|
||||
return contents;
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.symlink = function (parent, newName, oldPath) {
|
||||
var newPath = this.PATH.join2(this.fs.realPath(parent), newName);
|
||||
try {
|
||||
this.nodefs.symlinkSync(oldPath, newPath);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
BFSEmscriptenNodeOps.prototype.readlink = function (node) {
|
||||
var path = this.fs.realPath(node);
|
||||
try {
|
||||
return this.nodefs.readlinkSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
};
|
||||
return BFSEmscriptenNodeOps;
|
||||
}());
|
||||
var BFSEmscriptenFS = /** @class */ (function () {
|
||||
function BFSEmscriptenFS(_FS, _PATH, _ERRNO_CODES, nodefs) {
|
||||
if (_FS === void 0) { _FS = self['FS']; }
|
||||
if (_PATH === void 0) { _PATH = self['PATH']; }
|
||||
if (_ERRNO_CODES === void 0) { _ERRNO_CODES = self['ERRNO_CODES']; }
|
||||
if (nodefs === void 0) { nodefs = node_fs_1.default; }
|
||||
// This maps the integer permission modes from http://linux.die.net/man/3/open
|
||||
// to node.js-specific file open permission strings at http://nodejs.org/api/fs.html#fs_fs_open_path_flags_mode_callback
|
||||
this.flagsToPermissionStringMap = {
|
||||
0 /*O_RDONLY*/: 'r',
|
||||
1 /*O_WRONLY*/: 'r+',
|
||||
2 /*O_RDWR*/: 'r+',
|
||||
64 /*O_CREAT*/: 'r',
|
||||
65 /*O_WRONLY|O_CREAT*/: 'r+',
|
||||
66 /*O_RDWR|O_CREAT*/: 'r+',
|
||||
129 /*O_WRONLY|O_EXCL*/: 'rx+',
|
||||
193 /*O_WRONLY|O_CREAT|O_EXCL*/: 'rx+',
|
||||
514 /*O_RDWR|O_TRUNC*/: 'w+',
|
||||
577 /*O_WRONLY|O_CREAT|O_TRUNC*/: 'w',
|
||||
578 /*O_CREAT|O_RDWR|O_TRUNC*/: 'w+',
|
||||
705 /*O_WRONLY|O_CREAT|O_EXCL|O_TRUNC*/: 'wx',
|
||||
706 /*O_RDWR|O_CREAT|O_EXCL|O_TRUNC*/: 'wx+',
|
||||
1024 /*O_APPEND*/: 'a',
|
||||
1025 /*O_WRONLY|O_APPEND*/: 'a',
|
||||
1026 /*O_RDWR|O_APPEND*/: 'a+',
|
||||
1089 /*O_WRONLY|O_CREAT|O_APPEND*/: 'a',
|
||||
1090 /*O_RDWR|O_CREAT|O_APPEND*/: 'a+',
|
||||
1153 /*O_WRONLY|O_EXCL|O_APPEND*/: 'ax',
|
||||
1154 /*O_RDWR|O_EXCL|O_APPEND*/: 'ax+',
|
||||
1217 /*O_WRONLY|O_CREAT|O_EXCL|O_APPEND*/: 'ax',
|
||||
1218 /*O_RDWR|O_CREAT|O_EXCL|O_APPEND*/: 'ax+',
|
||||
4096 /*O_RDONLY|O_DSYNC*/: 'rs',
|
||||
4098 /*O_RDWR|O_DSYNC*/: 'rs+'
|
||||
};
|
||||
this.nodefs = nodefs;
|
||||
this.FS = _FS;
|
||||
this.PATH = _PATH;
|
||||
this.ERRNO_CODES = _ERRNO_CODES;
|
||||
this.node_ops = new BFSEmscriptenNodeOps(this);
|
||||
this.stream_ops = new BFSEmscriptenStreamOps(this);
|
||||
}
|
||||
BFSEmscriptenFS.prototype.mount = function (m) {
|
||||
return this.createNode(null, '/', this.getMode(m.opts.root), 0);
|
||||
};
|
||||
BFSEmscriptenFS.prototype.createNode = function (parent, name, mode, dev) {
|
||||
var FS = this.FS;
|
||||
if (!FS.isDir(mode) && !FS.isFile(mode) && !FS.isLink(mode)) {
|
||||
throw new FS.ErrnoError(this.ERRNO_CODES.EINVAL);
|
||||
}
|
||||
var node = FS.createNode(parent, name, mode);
|
||||
node.node_ops = this.node_ops;
|
||||
node.stream_ops = this.stream_ops;
|
||||
return node;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getMode = function (path) {
|
||||
var stat;
|
||||
try {
|
||||
stat = this.nodefs.lstatSync(path);
|
||||
}
|
||||
catch (e) {
|
||||
if (!e.code) {
|
||||
throw e;
|
||||
}
|
||||
throw new this.FS.ErrnoError(this.ERRNO_CODES[e.code]);
|
||||
}
|
||||
return stat.mode;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.realPath = function (node) {
|
||||
var parts = [];
|
||||
while (node.parent !== node) {
|
||||
parts.push(node.name);
|
||||
node = node.parent;
|
||||
}
|
||||
parts.push(node.mount.opts.root);
|
||||
parts.reverse();
|
||||
return this.PATH.join.apply(null, parts);
|
||||
};
|
||||
BFSEmscriptenFS.prototype.flagsToPermissionString = function (flags) {
|
||||
var parsedFlags = (typeof flags === "string") ? parseInt(flags, 10) : flags;
|
||||
parsedFlags &= 0x1FFF;
|
||||
if (parsedFlags in this.flagsToPermissionStringMap) {
|
||||
return this.flagsToPermissionStringMap[parsedFlags];
|
||||
}
|
||||
else {
|
||||
return flags;
|
||||
}
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getNodeFS = function () {
|
||||
return this.nodefs;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getFS = function () {
|
||||
return this.FS;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getPATH = function () {
|
||||
return this.PATH;
|
||||
};
|
||||
BFSEmscriptenFS.prototype.getERRNO_CODES = function () {
|
||||
return this.ERRNO_CODES;
|
||||
};
|
||||
return BFSEmscriptenFS;
|
||||
}());
|
||||
exports.default = BFSEmscriptenFS;
|
||||
//# sourceMappingURL=emscripten_fs.js.map
|
||||
14
sandpack-generated/static/browserfs11/node/generic/extended_ascii.d.ts
vendored
Normal file
14
sandpack-generated/static/browserfs11/node/generic/extended_ascii.d.ts
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
/// <reference types="node" />
|
||||
/**
|
||||
* (Nonstandard) String utility function for 8-bit ASCII with the extended
|
||||
* character set. Unlike the ASCII above, we do not mask the high bits.
|
||||
*
|
||||
* Placed into a separate file so it can be used with other Buffer implementations.
|
||||
* @see http://en.wikipedia.org/wiki/Extended_ASCII
|
||||
*/
|
||||
export default class ExtendedASCII {
|
||||
private static extendedChars;
|
||||
static str2byte(str: string, buf: Buffer): number;
|
||||
static byte2str(buff: Buffer): string;
|
||||
static byteLength(str: string): number;
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* (Nonstandard) String utility function for 8-bit ASCII with the extended
|
||||
* character set. Unlike the ASCII above, we do not mask the high bits.
|
||||
*
|
||||
* Placed into a separate file so it can be used with other Buffer implementations.
|
||||
* @see http://en.wikipedia.org/wiki/Extended_ASCII
|
||||
*/
|
||||
var ExtendedASCII = /** @class */ (function () {
|
||||
function ExtendedASCII() {
|
||||
}
|
||||
ExtendedASCII.str2byte = function (str, buf) {
|
||||
var length = str.length > buf.length ? buf.length : str.length;
|
||||
for (var i = 0; i < length; i++) {
|
||||
var charCode = str.charCodeAt(i);
|
||||
if (charCode > 0x7F) {
|
||||
// Check if extended ASCII.
|
||||
var charIdx = ExtendedASCII.extendedChars.indexOf(str.charAt(i));
|
||||
if (charIdx > -1) {
|
||||
charCode = charIdx + 0x80;
|
||||
}
|
||||
// Otherwise, keep it as-is.
|
||||
}
|
||||
buf[charCode] = i;
|
||||
}
|
||||
return length;
|
||||
};
|
||||
ExtendedASCII.byte2str = function (buff) {
|
||||
var chars = new Array(buff.length);
|
||||
for (var i = 0; i < buff.length; i++) {
|
||||
var charCode = buff[i];
|
||||
if (charCode > 0x7F) {
|
||||
chars[i] = ExtendedASCII.extendedChars[charCode - 128];
|
||||
}
|
||||
else {
|
||||
chars[i] = String.fromCharCode(charCode);
|
||||
}
|
||||
}
|
||||
return chars.join('');
|
||||
};
|
||||
ExtendedASCII.byteLength = function (str) { return str.length; };
|
||||
ExtendedASCII.extendedChars = ['\u00C7', '\u00FC', '\u00E9', '\u00E2', '\u00E4',
|
||||
'\u00E0', '\u00E5', '\u00E7', '\u00EA', '\u00EB', '\u00E8', '\u00EF',
|
||||
'\u00EE', '\u00EC', '\u00C4', '\u00C5', '\u00C9', '\u00E6', '\u00C6',
|
||||
'\u00F4', '\u00F6', '\u00F2', '\u00FB', '\u00F9', '\u00FF', '\u00D6',
|
||||
'\u00DC', '\u00F8', '\u00A3', '\u00D8', '\u00D7', '\u0192', '\u00E1',
|
||||
'\u00ED', '\u00F3', '\u00FA', '\u00F1', '\u00D1', '\u00AA', '\u00BA',
|
||||
'\u00BF', '\u00AE', '\u00AC', '\u00BD', '\u00BC', '\u00A1', '\u00AB',
|
||||
'\u00BB', '_', '_', '_', '\u00A6', '\u00A6', '\u00C1', '\u00C2', '\u00C0',
|
||||
'\u00A9', '\u00A6', '\u00A6', '+', '+', '\u00A2', '\u00A5', '+', '+', '-',
|
||||
'-', '+', '-', '+', '\u00E3', '\u00C3', '+', '+', '-', '-', '\u00A6', '-',
|
||||
'+', '\u00A4', '\u00F0', '\u00D0', '\u00CA', '\u00CB', '\u00C8', 'i',
|
||||
'\u00CD', '\u00CE', '\u00CF', '+', '+', '_', '_', '\u00A6', '\u00CC', '_',
|
||||
'\u00D3', '\u00DF', '\u00D4', '\u00D2', '\u00F5', '\u00D5', '\u00B5',
|
||||
'\u00FE', '\u00DE', '\u00DA', '\u00DB', '\u00D9', '\u00FD', '\u00DD',
|
||||
'\u00AF', '\u00B4', '\u00AD', '\u00B1', '_', '\u00BE', '\u00B6', '\u00A7',
|
||||
'\u00F7', '\u00B8', '\u00B0', '\u00A8', '\u00B7', '\u00B9', '\u00B3',
|
||||
'\u00B2', '_', ' '];
|
||||
return ExtendedASCII;
|
||||
}());
|
||||
exports.default = ExtendedASCII;
|
||||
//# sourceMappingURL=extended_ascii.js.map
|
||||
21
sandpack-generated/static/browserfs11/node/generic/fetch.d.ts
vendored
Normal file
21
sandpack-generated/static/browserfs11/node/generic/fetch.d.ts
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
/**
|
||||
* Contains utility methods using 'fetch'.
|
||||
*/
|
||||
/// <reference types="node" />
|
||||
import { BFSCallback } from '../core/file_system';
|
||||
export declare const fetchIsAvailable: boolean;
|
||||
/**
|
||||
* Asynchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function fetchFileAsync(p: string, type: 'buffer', cb: BFSCallback<Buffer>): void;
|
||||
export declare function fetchFileAsync(p: string, type: 'json', cb: BFSCallback<any>): void;
|
||||
export declare function fetchFileAsync(p: string, type: string, cb: BFSCallback<any>): void;
|
||||
/**
|
||||
* Asynchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function fetchFileSizeAsync(p: string, cb: BFSCallback<number>): void;
|
||||
60
sandpack-generated/static/browserfs11/node/generic/fetch.js
Normal file
60
sandpack-generated/static/browserfs11/node/generic/fetch.js
Normal file
@@ -0,0 +1,60 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Contains utility methods using 'fetch'.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.fetchFileSizeAsync = exports.fetchFileAsync = exports.fetchIsAvailable = void 0;
|
||||
var api_error_1 = require("../core/api_error");
|
||||
exports.fetchIsAvailable = (typeof (fetch) !== "undefined" && fetch !== null);
|
||||
function fetchFileAsync(p, type, cb) {
|
||||
var request;
|
||||
try {
|
||||
request = fetch(p);
|
||||
}
|
||||
catch (e) {
|
||||
// XXX: fetch will throw a TypeError if the URL has credentials in it
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, e.message));
|
||||
}
|
||||
request
|
||||
.then(function (res) {
|
||||
if (!res.ok) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "fetch error: response returned code ".concat(res.status)));
|
||||
}
|
||||
else {
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
res.arrayBuffer()
|
||||
.then(function (buf) { return cb(null, Buffer.from(buf)); })
|
||||
.catch(function (err) { return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, err.message)); });
|
||||
break;
|
||||
case 'json':
|
||||
res.json()
|
||||
.then(function (json) { return cb(null, json); })
|
||||
.catch(function (err) { return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, err.message)); });
|
||||
break;
|
||||
default:
|
||||
cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid download type: " + type));
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(function (err) { return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, err.message)); });
|
||||
}
|
||||
exports.fetchFileAsync = fetchFileAsync;
|
||||
/**
|
||||
* Asynchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
function fetchFileSizeAsync(p, cb) {
|
||||
fetch(p, { method: 'HEAD' })
|
||||
.then(function (res) {
|
||||
if (!res.ok) {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "fetch HEAD error: response returned code ".concat(res.status)));
|
||||
}
|
||||
else {
|
||||
return cb(null, parseInt(res.headers.get('Content-Length') || '-1', 10));
|
||||
}
|
||||
})
|
||||
.catch(function (err) { return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, err.message)); });
|
||||
}
|
||||
exports.fetchFileSizeAsync = fetchFileSizeAsync;
|
||||
//# sourceMappingURL=fetch.js.map
|
||||
150
sandpack-generated/static/browserfs11/node/generic/file_index.d.ts
vendored
Normal file
150
sandpack-generated/static/browserfs11/node/generic/file_index.d.ts
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { UNPKGMeta } from '../backend/UNPKGRequest';
|
||||
import { JSDelivrMeta } from '../backend/JSDelivrRequest';
|
||||
/**
|
||||
* A simple class for storing a filesystem index. Assumes that all paths passed
|
||||
* to it are *absolute* paths.
|
||||
*
|
||||
* Can be used as a partial or a full index, although care must be taken if used
|
||||
* for the former purpose, especially when directories are concerned.
|
||||
*/
|
||||
export declare class FileIndex<T> {
|
||||
/**
|
||||
* Static method for constructing indices from a JSON listing.
|
||||
* @param listing Directory listing generated by tools/XHRIndexer.coffee
|
||||
* @return A new FileIndex object.
|
||||
*/
|
||||
static fromListing<T>(listing: any): FileIndex<T>;
|
||||
static fromUnpkg<T>(listing: UNPKGMeta): FileIndex<T>;
|
||||
static fromJSDelivr<T>(listing: JSDelivrMeta): FileIndex<T>;
|
||||
private _index;
|
||||
/**
|
||||
* Constructs a new FileIndex.
|
||||
*/
|
||||
constructor();
|
||||
/**
|
||||
* Runs the given function over all files in the index.
|
||||
*/
|
||||
fileIterator<T>(cb: (file: T | null, path?: string) => void): void;
|
||||
/**
|
||||
* Adds the given absolute path to the index if it is not already in the index.
|
||||
* Creates any needed parent directories.
|
||||
* @param path The path to add to the index.
|
||||
* @param inode The inode for the
|
||||
* path to add.
|
||||
* @return 'True' if it was added or already exists, 'false' if there
|
||||
* was an issue adding it (e.g. item in path is a file, item exists but is
|
||||
* different).
|
||||
* @todo If adding fails and implicitly creates directories, we do not clean up
|
||||
* the new empty directories.
|
||||
*/
|
||||
addPath(path: string, inode: Inode): boolean;
|
||||
/**
|
||||
* Adds the given absolute path to the index if it is not already in the index.
|
||||
* The path is added without special treatment (no joining of adjacent separators, etc).
|
||||
* Creates any needed parent directories.
|
||||
* @param path The path to add to the index.
|
||||
* @param inode The inode for the
|
||||
* path to add.
|
||||
* @return 'True' if it was added or already exists, 'false' if there
|
||||
* was an issue adding it (e.g. item in path is a file, item exists but is
|
||||
* different).
|
||||
* @todo If adding fails and implicitly creates directories, we do not clean up
|
||||
* the new empty directories.
|
||||
*/
|
||||
addPathFast(path: string, inode: Inode): boolean;
|
||||
/**
|
||||
* Removes the given path. Can be a file or a directory.
|
||||
* @return The removed item,
|
||||
* or null if it did not exist.
|
||||
*/
|
||||
removePath(path: string): Inode | null;
|
||||
/**
|
||||
* Retrieves the directory listing of the given path.
|
||||
* @return An array of files in the given path, or 'null' if it does not exist.
|
||||
*/
|
||||
ls(path: string): string[] | null;
|
||||
/**
|
||||
* Returns the inode of the given item.
|
||||
* @return Returns null if the item does not exist.
|
||||
*/
|
||||
getInode(path: string): Inode | null;
|
||||
/**
|
||||
* Split into a (directory path, item name) pair
|
||||
*/
|
||||
private _split_path;
|
||||
}
|
||||
/**
|
||||
* Generic interface for file/directory inodes.
|
||||
* Note that Stats objects are what we use for file inodes.
|
||||
*/
|
||||
export interface Inode {
|
||||
isFile(): boolean;
|
||||
isDir(): boolean;
|
||||
}
|
||||
/**
|
||||
* Inode for a file. Stores an arbitrary (filesystem-specific) data payload.
|
||||
*/
|
||||
export declare class FileInode<T> implements Inode {
|
||||
private data;
|
||||
constructor(data: T);
|
||||
isFile(): boolean;
|
||||
isDir(): boolean;
|
||||
getData(): T;
|
||||
setData(data: T): void;
|
||||
}
|
||||
/**
|
||||
* Inode for a directory. Currently only contains the directory listing.
|
||||
*/
|
||||
export declare class DirInode<T> implements Inode {
|
||||
private data;
|
||||
private _ls;
|
||||
/**
|
||||
* Constructs an inode for a directory.
|
||||
*/
|
||||
constructor(data?: T | null);
|
||||
isFile(): boolean;
|
||||
isDir(): boolean;
|
||||
getData(): T | null;
|
||||
/**
|
||||
* Return a Stats object for this inode.
|
||||
* @todo Should probably remove this at some point. This isn't the
|
||||
* responsibility of the FileIndex.
|
||||
*/
|
||||
getStats(): Stats;
|
||||
/**
|
||||
* Returns the directory listing for this directory. Paths in the directory are
|
||||
* relative to the directory's path.
|
||||
* @return The directory listing for this directory.
|
||||
*/
|
||||
getListing(): string[];
|
||||
/**
|
||||
* Returns the inode for the indicated item, or null if it does not exist.
|
||||
* @param p Name of item in this directory.
|
||||
*/
|
||||
getItem(p: string): Inode | null;
|
||||
/**
|
||||
* Add the given item to the directory listing. Note that the given inode is
|
||||
* not copied, and will be mutated by the DirInode if it is a DirInode.
|
||||
* @param p Item name to add to the directory listing.
|
||||
* @param inode The inode for the
|
||||
* item to add to the directory inode.
|
||||
* @return True if it was added, false if it already existed.
|
||||
*/
|
||||
addItem(p: string, inode: Inode): boolean;
|
||||
/**
|
||||
* Removes the given item from the directory listing.
|
||||
* @param p Name of item to remove from the directory listing.
|
||||
* @return Returns the item
|
||||
* removed, or null if the item did not exist.
|
||||
*/
|
||||
remItem(p: string): Inode | null;
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export declare function isFileInode<T>(inode: Inode | null): inode is FileInode<T>;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export declare function isDirInode<T>(inode: Inode | null): inode is DirInode<T>;
|
||||
364
sandpack-generated/static/browserfs11/node/generic/file_index.js
Normal file
364
sandpack-generated/static/browserfs11/node/generic/file_index.js
Normal file
@@ -0,0 +1,364 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isDirInode = exports.isFileInode = exports.DirInode = exports.FileInode = exports.FileIndex = void 0;
|
||||
var path = require("path");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
/**
|
||||
* A simple class for storing a filesystem index. Assumes that all paths passed
|
||||
* to it are *absolute* paths.
|
||||
*
|
||||
* Can be used as a partial or a full index, although care must be taken if used
|
||||
* for the former purpose, especially when directories are concerned.
|
||||
*/
|
||||
var FileIndex = /** @class */ (function () {
|
||||
/**
|
||||
* Constructs a new FileIndex.
|
||||
*/
|
||||
function FileIndex() {
|
||||
// _index is a single-level key,value store that maps *directory* paths to
|
||||
// DirInodes. File information is only contained in DirInodes themselves.
|
||||
this._index = {};
|
||||
// Create the root directory.
|
||||
this.addPath('/', new DirInode());
|
||||
}
|
||||
/**
|
||||
* Static method for constructing indices from a JSON listing.
|
||||
* @param listing Directory listing generated by tools/XHRIndexer.coffee
|
||||
* @return A new FileIndex object.
|
||||
*/
|
||||
FileIndex.fromListing = function (listing) {
|
||||
var idx = new FileIndex();
|
||||
// Add a root DirNode.
|
||||
var rootInode = new DirInode();
|
||||
idx._index['/'] = rootInode;
|
||||
var queue = [['', listing, rootInode]];
|
||||
while (queue.length > 0) {
|
||||
var inode = void 0;
|
||||
var next = queue.pop();
|
||||
var pwd = next[0];
|
||||
var tree = next[1];
|
||||
var parent_1 = next[2];
|
||||
for (var node in tree) {
|
||||
if (tree.hasOwnProperty(node)) {
|
||||
var children = tree[node];
|
||||
var name_1 = "".concat(pwd, "/").concat(node);
|
||||
if (children) {
|
||||
idx._index[name_1] = inode = new DirInode();
|
||||
queue.push([name_1, children, inode]);
|
||||
}
|
||||
else {
|
||||
// This inode doesn't have correct size information, noted with -1.
|
||||
inode = new FileInode(new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, -1, 0x16D));
|
||||
}
|
||||
if (parent_1) {
|
||||
parent_1._ls[node] = inode;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return idx;
|
||||
};
|
||||
FileIndex.fromUnpkg = function (listing) {
|
||||
var idx = new FileIndex();
|
||||
function handleDir(dirPath, entry) {
|
||||
var dirInode = new DirInode();
|
||||
entry.files.forEach(function (child) {
|
||||
var inode;
|
||||
if (child.type === 'file') {
|
||||
inode = new FileInode(new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, child.size));
|
||||
// @ts-ignore
|
||||
dirInode._ls[path.basename(child.path)] = inode;
|
||||
}
|
||||
else {
|
||||
idx._index[child.path] = inode = handleDir(child.path, child);
|
||||
}
|
||||
});
|
||||
return dirInode;
|
||||
}
|
||||
idx._index['/'] = handleDir('/', listing);
|
||||
return idx;
|
||||
};
|
||||
FileIndex.fromJSDelivr = function (listing) {
|
||||
var idx = new FileIndex();
|
||||
listing.files.forEach(function (file) {
|
||||
var inode = new FileInode(new node_fs_stats_1.default(node_fs_stats_1.FileType.FILE, file.size));
|
||||
idx.addPathFast(file.name, inode);
|
||||
});
|
||||
return idx;
|
||||
};
|
||||
/**
|
||||
* Runs the given function over all files in the index.
|
||||
*/
|
||||
FileIndex.prototype.fileIterator = function (cb) {
|
||||
for (var path_1 in this._index) {
|
||||
if (this._index.hasOwnProperty(path_1)) {
|
||||
var dir = this._index[path_1];
|
||||
var files = dir.getListing();
|
||||
for (var _i = 0, files_1 = files; _i < files_1.length; _i++) {
|
||||
var file = files_1[_i];
|
||||
var item = dir.getItem(file);
|
||||
if (isFileInode(item)) {
|
||||
cb(item.getData(), path_1 + '/' + file);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Adds the given absolute path to the index if it is not already in the index.
|
||||
* Creates any needed parent directories.
|
||||
* @param path The path to add to the index.
|
||||
* @param inode The inode for the
|
||||
* path to add.
|
||||
* @return 'True' if it was added or already exists, 'false' if there
|
||||
* was an issue adding it (e.g. item in path is a file, item exists but is
|
||||
* different).
|
||||
* @todo If adding fails and implicitly creates directories, we do not clean up
|
||||
* the new empty directories.
|
||||
*/
|
||||
FileIndex.prototype.addPath = function (path, inode) {
|
||||
if (!inode) {
|
||||
throw new Error('Inode must be specified');
|
||||
}
|
||||
if (path[0] !== '/') {
|
||||
throw new Error('Path must be absolute, got: ' + path);
|
||||
}
|
||||
// Check if it already exists.
|
||||
if (this._index.hasOwnProperty(path)) {
|
||||
return this._index[path] === inode;
|
||||
}
|
||||
var splitPath = this._split_path(path);
|
||||
var dirpath = splitPath[0];
|
||||
var itemname = splitPath[1];
|
||||
// Try to add to its parent directory first.
|
||||
var parent = this._index[dirpath];
|
||||
if (parent === undefined && path !== '/') {
|
||||
// Create parent.
|
||||
parent = new DirInode();
|
||||
if (!this.addPath(dirpath, parent)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// Add myself to my parent.
|
||||
if (path !== '/') {
|
||||
if (!parent.addItem(itemname, inode)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// If I'm a directory, add myself to the index.
|
||||
if (isDirInode(inode)) {
|
||||
this._index[path] = inode;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
/**
|
||||
* Adds the given absolute path to the index if it is not already in the index.
|
||||
* The path is added without special treatment (no joining of adjacent separators, etc).
|
||||
* Creates any needed parent directories.
|
||||
* @param path The path to add to the index.
|
||||
* @param inode The inode for the
|
||||
* path to add.
|
||||
* @return 'True' if it was added or already exists, 'false' if there
|
||||
* was an issue adding it (e.g. item in path is a file, item exists but is
|
||||
* different).
|
||||
* @todo If adding fails and implicitly creates directories, we do not clean up
|
||||
* the new empty directories.
|
||||
*/
|
||||
FileIndex.prototype.addPathFast = function (path, inode) {
|
||||
var itemNameMark = path.lastIndexOf('/');
|
||||
var parentPath = itemNameMark === 0 ? "/" : path.substring(0, itemNameMark);
|
||||
var itemName = path.substring(itemNameMark + 1);
|
||||
// Try to add to its parent directory first.
|
||||
var parent = this._index[parentPath];
|
||||
if (parent === undefined) {
|
||||
// Create parent.
|
||||
parent = new DirInode();
|
||||
this.addPathFast(parentPath, parent);
|
||||
}
|
||||
if (!parent.addItem(itemName, inode)) {
|
||||
return false;
|
||||
}
|
||||
// If adding a directory, add to the index as well.
|
||||
if (inode.isDir()) {
|
||||
this._index[path] = inode;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
/**
|
||||
* Removes the given path. Can be a file or a directory.
|
||||
* @return The removed item,
|
||||
* or null if it did not exist.
|
||||
*/
|
||||
FileIndex.prototype.removePath = function (path) {
|
||||
var splitPath = this._split_path(path);
|
||||
var dirpath = splitPath[0];
|
||||
var itemname = splitPath[1];
|
||||
// Try to remove it from its parent directory first.
|
||||
var parent = this._index[dirpath];
|
||||
if (parent === undefined) {
|
||||
return null;
|
||||
}
|
||||
// Remove myself from my parent.
|
||||
var inode = parent.remItem(itemname);
|
||||
if (inode === null) {
|
||||
return null;
|
||||
}
|
||||
// If I'm a directory, remove myself from the index, and remove my children.
|
||||
if (isDirInode(inode)) {
|
||||
var children = inode.getListing();
|
||||
for (var _i = 0, children_1 = children; _i < children_1.length; _i++) {
|
||||
var child = children_1[_i];
|
||||
this.removePath(path + '/' + child);
|
||||
}
|
||||
// Remove the directory from the index, unless it's the root.
|
||||
if (path !== '/') {
|
||||
delete this._index[path];
|
||||
}
|
||||
}
|
||||
return inode;
|
||||
};
|
||||
/**
|
||||
* Retrieves the directory listing of the given path.
|
||||
* @return An array of files in the given path, or 'null' if it does not exist.
|
||||
*/
|
||||
FileIndex.prototype.ls = function (path) {
|
||||
var item = this._index[path];
|
||||
if (item === undefined) {
|
||||
return null;
|
||||
}
|
||||
return item.getListing();
|
||||
};
|
||||
/**
|
||||
* Returns the inode of the given item.
|
||||
* @return Returns null if the item does not exist.
|
||||
*/
|
||||
FileIndex.prototype.getInode = function (path) {
|
||||
var splitPath = this._split_path(path);
|
||||
var dirpath = splitPath[0];
|
||||
var itemname = splitPath[1];
|
||||
// Retrieve from its parent directory.
|
||||
var parent = this._index[dirpath];
|
||||
if (parent === undefined) {
|
||||
return null;
|
||||
}
|
||||
// Root case
|
||||
if (dirpath === path) {
|
||||
return parent;
|
||||
}
|
||||
return parent.getItem(itemname);
|
||||
};
|
||||
/**
|
||||
* Split into a (directory path, item name) pair
|
||||
*/
|
||||
FileIndex.prototype._split_path = function (p) {
|
||||
var dirpath = path.dirname(p);
|
||||
var itemname = p.substr(dirpath.length + (dirpath === "/" ? 0 : 1));
|
||||
return [dirpath, itemname];
|
||||
};
|
||||
return FileIndex;
|
||||
}());
|
||||
exports.FileIndex = FileIndex;
|
||||
/**
|
||||
* Inode for a file. Stores an arbitrary (filesystem-specific) data payload.
|
||||
*/
|
||||
var FileInode = /** @class */ (function () {
|
||||
function FileInode(data) {
|
||||
this.data = data;
|
||||
}
|
||||
FileInode.prototype.isFile = function () { return true; };
|
||||
FileInode.prototype.isDir = function () { return false; };
|
||||
FileInode.prototype.getData = function () { return this.data; };
|
||||
FileInode.prototype.setData = function (data) { this.data = data; };
|
||||
return FileInode;
|
||||
}());
|
||||
exports.FileInode = FileInode;
|
||||
/**
|
||||
* Inode for a directory. Currently only contains the directory listing.
|
||||
*/
|
||||
var DirInode = /** @class */ (function () {
|
||||
/**
|
||||
* Constructs an inode for a directory.
|
||||
*/
|
||||
function DirInode(data) {
|
||||
if (data === void 0) { data = null; }
|
||||
this.data = data;
|
||||
this._ls = {};
|
||||
}
|
||||
DirInode.prototype.isFile = function () {
|
||||
return false;
|
||||
};
|
||||
DirInode.prototype.isDir = function () {
|
||||
return true;
|
||||
};
|
||||
DirInode.prototype.getData = function () { return this.data; };
|
||||
/**
|
||||
* Return a Stats object for this inode.
|
||||
* @todo Should probably remove this at some point. This isn't the
|
||||
* responsibility of the FileIndex.
|
||||
*/
|
||||
DirInode.prototype.getStats = function () {
|
||||
return new node_fs_stats_1.default(node_fs_stats_1.FileType.DIRECTORY, 4096, 0x16D);
|
||||
};
|
||||
/**
|
||||
* Returns the directory listing for this directory. Paths in the directory are
|
||||
* relative to the directory's path.
|
||||
* @return The directory listing for this directory.
|
||||
*/
|
||||
DirInode.prototype.getListing = function () {
|
||||
return Object.keys(this._ls);
|
||||
};
|
||||
/**
|
||||
* Returns the inode for the indicated item, or null if it does not exist.
|
||||
* @param p Name of item in this directory.
|
||||
*/
|
||||
DirInode.prototype.getItem = function (p) {
|
||||
var item = this._ls[p];
|
||||
return item && this._ls.hasOwnProperty(p) ? item : null;
|
||||
};
|
||||
/**
|
||||
* Add the given item to the directory listing. Note that the given inode is
|
||||
* not copied, and will be mutated by the DirInode if it is a DirInode.
|
||||
* @param p Item name to add to the directory listing.
|
||||
* @param inode The inode for the
|
||||
* item to add to the directory inode.
|
||||
* @return True if it was added, false if it already existed.
|
||||
*/
|
||||
DirInode.prototype.addItem = function (p, inode) {
|
||||
if (p in this._ls) {
|
||||
return false;
|
||||
}
|
||||
this._ls[p] = inode;
|
||||
return true;
|
||||
};
|
||||
/**
|
||||
* Removes the given item from the directory listing.
|
||||
* @param p Name of item to remove from the directory listing.
|
||||
* @return Returns the item
|
||||
* removed, or null if the item did not exist.
|
||||
*/
|
||||
DirInode.prototype.remItem = function (p) {
|
||||
var item = this._ls[p];
|
||||
if (item === undefined) {
|
||||
return null;
|
||||
}
|
||||
delete this._ls[p];
|
||||
return item;
|
||||
};
|
||||
return DirInode;
|
||||
}());
|
||||
exports.DirInode = DirInode;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function isFileInode(inode) {
|
||||
return !!inode && inode.isFile();
|
||||
}
|
||||
exports.isFileInode = isFileInode;
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function isDirInode(inode) {
|
||||
return !!inode && inode.isDir();
|
||||
}
|
||||
exports.isDirInode = isDirInode;
|
||||
//# sourceMappingURL=file_index.js.map
|
||||
49
sandpack-generated/static/browserfs11/node/generic/inode.d.ts
vendored
Normal file
49
sandpack-generated/static/browserfs11/node/generic/inode.d.ts
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
/// <reference types="node" />
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
/**
|
||||
* Generic inode definition that can easily be serialized.
|
||||
*/
|
||||
export default class Inode {
|
||||
id: string;
|
||||
size: number;
|
||||
mode: number;
|
||||
atime: number;
|
||||
mtime: number;
|
||||
ctime: number;
|
||||
/**
|
||||
* Converts the buffer into an Inode.
|
||||
*/
|
||||
static fromBuffer(buffer: Buffer): Inode;
|
||||
constructor(id: string, size: number, mode: number, atime: number, mtime: number, ctime: number);
|
||||
/**
|
||||
* Handy function that converts the Inode to a Node Stats object.
|
||||
*/
|
||||
toStats(): Stats;
|
||||
/**
|
||||
* Get the size of this Inode, in bytes.
|
||||
*/
|
||||
getSize(): number;
|
||||
/**
|
||||
* Writes the inode into the start of the buffer.
|
||||
*/
|
||||
toBuffer(buff?: Buffer): Buffer;
|
||||
/**
|
||||
* Updates the Inode using information from the stats object. Used by file
|
||||
* systems at sync time, e.g.:
|
||||
* - Program opens file and gets a File object.
|
||||
* - Program mutates file. File object is responsible for maintaining
|
||||
* metadata changes locally -- typically in a Stats object.
|
||||
* - Program closes file. File object's metadata changes are synced with the
|
||||
* file system.
|
||||
* @return True if any changes have occurred.
|
||||
*/
|
||||
update(stats: Stats): boolean;
|
||||
/**
|
||||
* @return [Boolean] True if this item is a file.
|
||||
*/
|
||||
isFile(): boolean;
|
||||
/**
|
||||
* @return [Boolean] True if this item is a directory.
|
||||
*/
|
||||
isDirectory(): boolean;
|
||||
}
|
||||
105
sandpack-generated/static/browserfs11/node/generic/inode.js
Normal file
105
sandpack-generated/static/browserfs11/node/generic/inode.js
Normal file
@@ -0,0 +1,105 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
/**
|
||||
* Generic inode definition that can easily be serialized.
|
||||
*/
|
||||
var Inode = /** @class */ (function () {
|
||||
function Inode(id, size, mode, atime, mtime, ctime) {
|
||||
this.id = id;
|
||||
this.size = size;
|
||||
this.mode = mode;
|
||||
this.atime = atime;
|
||||
this.mtime = mtime;
|
||||
this.ctime = ctime;
|
||||
}
|
||||
/**
|
||||
* Converts the buffer into an Inode.
|
||||
*/
|
||||
Inode.fromBuffer = function (buffer) {
|
||||
if (buffer === undefined) {
|
||||
throw new Error("NO");
|
||||
}
|
||||
return new Inode(buffer.toString('ascii', 30), buffer.readUInt32LE(0), buffer.readUInt16LE(4), buffer.readDoubleLE(6), buffer.readDoubleLE(14), buffer.readDoubleLE(22));
|
||||
};
|
||||
/**
|
||||
* Handy function that converts the Inode to a Node Stats object.
|
||||
*/
|
||||
Inode.prototype.toStats = function () {
|
||||
return new node_fs_stats_1.default((this.mode & 0xF000) === node_fs_stats_1.FileType.DIRECTORY ? node_fs_stats_1.FileType.DIRECTORY : node_fs_stats_1.FileType.FILE, this.size, this.mode, this.atime, this.mtime, this.ctime);
|
||||
};
|
||||
/**
|
||||
* Get the size of this Inode, in bytes.
|
||||
*/
|
||||
Inode.prototype.getSize = function () {
|
||||
// ASSUMPTION: ID is ASCII (1 byte per char).
|
||||
return 30 + this.id.length;
|
||||
};
|
||||
/**
|
||||
* Writes the inode into the start of the buffer.
|
||||
*/
|
||||
Inode.prototype.toBuffer = function (buff) {
|
||||
if (buff === void 0) { buff = Buffer.alloc(this.getSize()); }
|
||||
buff.writeUInt32LE(this.size, 0);
|
||||
buff.writeUInt16LE(this.mode, 4);
|
||||
buff.writeDoubleLE(this.atime, 6);
|
||||
buff.writeDoubleLE(this.mtime, 14);
|
||||
buff.writeDoubleLE(this.ctime, 22);
|
||||
buff.write(this.id, 30, this.id.length, 'ascii');
|
||||
return buff;
|
||||
};
|
||||
/**
|
||||
* Updates the Inode using information from the stats object. Used by file
|
||||
* systems at sync time, e.g.:
|
||||
* - Program opens file and gets a File object.
|
||||
* - Program mutates file. File object is responsible for maintaining
|
||||
* metadata changes locally -- typically in a Stats object.
|
||||
* - Program closes file. File object's metadata changes are synced with the
|
||||
* file system.
|
||||
* @return True if any changes have occurred.
|
||||
*/
|
||||
Inode.prototype.update = function (stats) {
|
||||
var hasChanged = false;
|
||||
if (this.size !== stats.size) {
|
||||
this.size = stats.size;
|
||||
hasChanged = true;
|
||||
}
|
||||
if (this.mode !== stats.mode) {
|
||||
this.mode = stats.mode;
|
||||
hasChanged = true;
|
||||
}
|
||||
var atimeMs = stats.atime.getTime();
|
||||
if (this.atime !== atimeMs) {
|
||||
this.atime = atimeMs;
|
||||
hasChanged = true;
|
||||
}
|
||||
var mtimeMs = stats.mtime.getTime();
|
||||
if (this.mtime !== mtimeMs) {
|
||||
this.mtime = mtimeMs;
|
||||
hasChanged = true;
|
||||
}
|
||||
var ctimeMs = stats.ctime.getTime();
|
||||
if (this.ctime !== ctimeMs) {
|
||||
this.ctime = ctimeMs;
|
||||
hasChanged = true;
|
||||
}
|
||||
return hasChanged;
|
||||
};
|
||||
// XXX: Copied from Stats. Should reconcile these two into something more
|
||||
// compact.
|
||||
/**
|
||||
* @return [Boolean] True if this item is a file.
|
||||
*/
|
||||
Inode.prototype.isFile = function () {
|
||||
return (this.mode & 0xF000) === node_fs_stats_1.FileType.FILE;
|
||||
};
|
||||
/**
|
||||
* @return [Boolean] True if this item is a directory.
|
||||
*/
|
||||
Inode.prototype.isDirectory = function () {
|
||||
return (this.mode & 0xF000) === node_fs_stats_1.FileType.DIRECTORY;
|
||||
};
|
||||
return Inode;
|
||||
}());
|
||||
exports.default = Inode;
|
||||
//# sourceMappingURL=inode.js.map
|
||||
367
sandpack-generated/static/browserfs11/node/generic/key_value_filesystem.d.ts
vendored
Normal file
367
sandpack-generated/static/browserfs11/node/generic/key_value_filesystem.d.ts
vendored
Normal file
@@ -0,0 +1,367 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFileSystem, SynchronousFileSystem, BFSOneArgCallback, BFSCallback } from '../core/file_system';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { File } from '../core/file';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import PreloadFile from '../generic/preload_file';
|
||||
/**
|
||||
* Represents a *synchronous* key-value store.
|
||||
*/
|
||||
export interface SyncKeyValueStore {
|
||||
/**
|
||||
* The name of the key-value store.
|
||||
*/
|
||||
name(): string;
|
||||
/**
|
||||
* Empties the key-value store completely.
|
||||
*/
|
||||
clear(): void;
|
||||
/**
|
||||
* Begins a new read-only transaction.
|
||||
*/
|
||||
beginTransaction(type: "readonly"): SyncKeyValueROTransaction;
|
||||
/**
|
||||
* Begins a new read-write transaction.
|
||||
*/
|
||||
beginTransaction(type: "readwrite"): SyncKeyValueRWTransaction;
|
||||
beginTransaction(type: string): SyncKeyValueROTransaction;
|
||||
}
|
||||
/**
|
||||
* A read-only transaction for a synchronous key value store.
|
||||
*/
|
||||
export interface SyncKeyValueROTransaction {
|
||||
/**
|
||||
* Retrieves the data at the given key. Throws an ApiError if an error occurs
|
||||
* or if the key does not exist.
|
||||
* @param key The key to look under for data.
|
||||
* @return The data stored under the key, or undefined if not present.
|
||||
*/
|
||||
get(key: string): Buffer | undefined;
|
||||
}
|
||||
/**
|
||||
* A read-write transaction for a synchronous key value store.
|
||||
*/
|
||||
export interface SyncKeyValueRWTransaction extends SyncKeyValueROTransaction {
|
||||
/**
|
||||
* Adds the data to the store under the given key.
|
||||
* @param key The key to add the data under.
|
||||
* @param data The data to add to the store.
|
||||
* @param overwrite If 'true', overwrite any existing data. If 'false',
|
||||
* avoids storing the data if the key exists.
|
||||
* @return True if storage succeeded, false otherwise.
|
||||
*/
|
||||
put(key: string, data: Buffer, overwrite: boolean): boolean;
|
||||
/**
|
||||
* Deletes the data at the given key.
|
||||
* @param key The key to delete from the store.
|
||||
*/
|
||||
del(key: string): void;
|
||||
/**
|
||||
* Commits the transaction.
|
||||
*/
|
||||
commit(): void;
|
||||
/**
|
||||
* Aborts and rolls back the transaction.
|
||||
*/
|
||||
abort(): void;
|
||||
}
|
||||
/**
|
||||
* An interface for simple synchronous key-value stores that don't have special
|
||||
* support for transactions and such.
|
||||
*/
|
||||
export interface SimpleSyncStore {
|
||||
get(key: string): Buffer | undefined;
|
||||
put(key: string, data: Buffer, overwrite: boolean): boolean;
|
||||
del(key: string): void;
|
||||
}
|
||||
/**
|
||||
* A simple RW transaction for simple synchronous key-value stores.
|
||||
*/
|
||||
export declare class SimpleSyncRWTransaction implements SyncKeyValueRWTransaction {
|
||||
private store;
|
||||
/**
|
||||
* Stores data in the keys we modify prior to modifying them.
|
||||
* Allows us to roll back commits.
|
||||
*/
|
||||
private originalData;
|
||||
/**
|
||||
* List of keys modified in this transaction, if any.
|
||||
*/
|
||||
private modifiedKeys;
|
||||
constructor(store: SimpleSyncStore);
|
||||
get(key: string): Buffer | undefined;
|
||||
put(key: string, data: Buffer, overwrite: boolean): boolean;
|
||||
del(key: string): void;
|
||||
commit(): void;
|
||||
abort(): void;
|
||||
/**
|
||||
* Stashes given key value pair into `originalData` if it doesn't already
|
||||
* exist. Allows us to stash values the program is requesting anyway to
|
||||
* prevent needless `get` requests if the program modifies the data later
|
||||
* on during the transaction.
|
||||
*/
|
||||
private stashOldValue;
|
||||
/**
|
||||
* Marks the given key as modified, and stashes its value if it has not been
|
||||
* stashed already.
|
||||
*/
|
||||
private markModified;
|
||||
}
|
||||
export interface SyncKeyValueFileSystemOptions {
|
||||
/**
|
||||
* The actual key-value store to read from/write to.
|
||||
*/
|
||||
store: SyncKeyValueStore;
|
||||
}
|
||||
export declare class SyncKeyValueFile extends PreloadFile<SyncKeyValueFileSystem> implements File {
|
||||
constructor(_fs: SyncKeyValueFileSystem, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
syncSync(): void;
|
||||
closeSync(): void;
|
||||
}
|
||||
/**
|
||||
* A "Synchronous key-value file system". Stores data to/retrieves data from an
|
||||
* underlying key-value store.
|
||||
*
|
||||
* We use a unique ID for each node in the file system. The root node has a
|
||||
* fixed ID.
|
||||
* @todo Introduce Node ID caching.
|
||||
* @todo Check modes.
|
||||
*/
|
||||
export declare class SyncKeyValueFileSystem extends SynchronousFileSystem {
|
||||
static isAvailable(): boolean;
|
||||
private store;
|
||||
constructor(options: SyncKeyValueFileSystemOptions);
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsSymlinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Delete all contents stored in the file system.
|
||||
*/
|
||||
empty(): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
statSync(p: string, isLstat: boolean): Stats;
|
||||
createFileSync(p: string, flag: FileFlag, mode: number): File;
|
||||
openFileSync(p: string, flag: FileFlag): File;
|
||||
unlinkSync(p: string): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
readdirSync(p: string): string[];
|
||||
_syncSync(p: string, data: Buffer, stats: Stats): void;
|
||||
/**
|
||||
* Checks if the root directory exists. Creates it if it doesn't.
|
||||
*/
|
||||
private makeRootDirectory;
|
||||
/**
|
||||
* Helper function for findINode.
|
||||
* @param parent The parent directory of the file we are attempting to find.
|
||||
* @param filename The filename of the inode we are attempting to find, minus
|
||||
* the parent.
|
||||
* @return string The ID of the file's inode in the file system.
|
||||
*/
|
||||
private _findINode;
|
||||
/**
|
||||
* Finds the Inode of the given path.
|
||||
* @param p The path to look up.
|
||||
* @return The Inode of the path p.
|
||||
* @todo memoize/cache
|
||||
*/
|
||||
private findINode;
|
||||
/**
|
||||
* Given the ID of a node, retrieves the corresponding Inode.
|
||||
* @param tx The transaction to use.
|
||||
* @param p The corresponding path to the file (used for error messages).
|
||||
* @param id The ID to look up.
|
||||
*/
|
||||
private getINode;
|
||||
/**
|
||||
* Given the Inode of a directory, retrieves the corresponding directory
|
||||
* listing.
|
||||
*/
|
||||
private getDirListing;
|
||||
/**
|
||||
* Creates a new node under a random ID. Retries 5 times before giving up in
|
||||
* the exceedingly unlikely chance that we try to reuse a random GUID.
|
||||
* @return The GUID that the data was stored under.
|
||||
*/
|
||||
private addNewNode;
|
||||
/**
|
||||
* Commits a new file (well, a FILE or a DIRECTORY) to the file system with
|
||||
* the given mode.
|
||||
* Note: This will commit the transaction.
|
||||
* @param p The path to the new file.
|
||||
* @param type The type of the new file.
|
||||
* @param mode The mode to create the new file with.
|
||||
* @param data The data to store at the file's data node.
|
||||
* @return The Inode for the new file.
|
||||
*/
|
||||
private commitNewFile;
|
||||
/**
|
||||
* Remove all traces of the given path from the file system.
|
||||
* @param p The path to remove from the file system.
|
||||
* @param isDir Does the path belong to a directory, or a file?
|
||||
* @todo Update mtime.
|
||||
*/
|
||||
private removeEntry;
|
||||
}
|
||||
/**
|
||||
* Represents an *asynchronous* key-value store.
|
||||
*/
|
||||
export interface AsyncKeyValueStore {
|
||||
/**
|
||||
* The name of the key-value store.
|
||||
*/
|
||||
name(): string;
|
||||
/**
|
||||
* Empties the key-value store completely.
|
||||
*/
|
||||
clear(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Begins a read-write transaction.
|
||||
*/
|
||||
beginTransaction(type: 'readwrite'): AsyncKeyValueRWTransaction;
|
||||
/**
|
||||
* Begins a read-only transaction.
|
||||
*/
|
||||
beginTransaction(type: 'readonly'): AsyncKeyValueROTransaction;
|
||||
beginTransaction(type: string): AsyncKeyValueROTransaction;
|
||||
}
|
||||
/**
|
||||
* Represents an asynchronous read-only transaction.
|
||||
*/
|
||||
export interface AsyncKeyValueROTransaction {
|
||||
/**
|
||||
* Retrieves the data at the given key.
|
||||
* @param key The key to look under for data.
|
||||
*/
|
||||
get(key: string, cb: BFSCallback<Buffer>): void;
|
||||
}
|
||||
/**
|
||||
* Represents an asynchronous read-write transaction.
|
||||
*/
|
||||
export interface AsyncKeyValueRWTransaction extends AsyncKeyValueROTransaction {
|
||||
/**
|
||||
* Adds the data to the store under the given key. Overwrites any existing
|
||||
* data.
|
||||
* @param key The key to add the data under.
|
||||
* @param data The data to add to the store.
|
||||
* @param overwrite If 'true', overwrite any existing data. If 'false',
|
||||
* avoids writing the data if the key exists.
|
||||
* @param cb Triggered with an error and whether or not the value was
|
||||
* committed.
|
||||
*/
|
||||
put(key: string, data: Buffer, overwrite: boolean, cb: BFSCallback<boolean>): void;
|
||||
/**
|
||||
* Deletes the data at the given key.
|
||||
* @param key The key to delete from the store.
|
||||
*/
|
||||
del(key: string, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Commits the transaction.
|
||||
*/
|
||||
commit(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Aborts and rolls back the transaction.
|
||||
*/
|
||||
abort(cb: BFSOneArgCallback): void;
|
||||
}
|
||||
export declare class AsyncKeyValueFile extends PreloadFile<AsyncKeyValueFileSystem> implements File {
|
||||
constructor(_fs: AsyncKeyValueFileSystem, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
}
|
||||
/**
|
||||
* An "Asynchronous key-value file system". Stores data to/retrieves data from
|
||||
* an underlying asynchronous key-value store.
|
||||
*/
|
||||
export declare class AsyncKeyValueFileSystem extends BaseFileSystem {
|
||||
static isAvailable(): boolean;
|
||||
protected store: AsyncKeyValueStore;
|
||||
private _cache;
|
||||
constructor(cacheSize: number);
|
||||
/**
|
||||
* Initializes the file system. Typically called by subclasses' async
|
||||
* constructors.
|
||||
*/
|
||||
init(store: AsyncKeyValueStore, cb: BFSOneArgCallback): void;
|
||||
getName(): string;
|
||||
isReadOnly(): boolean;
|
||||
supportsSymlinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
/**
|
||||
* Delete all contents stored in the file system.
|
||||
*/
|
||||
empty(cb: BFSOneArgCallback): void;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
stat(p: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
createFile(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openFile(p: string, flag: FileFlag, cb: BFSCallback<File>): void;
|
||||
unlink(p: string, cb: BFSOneArgCallback): void;
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
mkdir(p: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
_sync(p: string, data: Buffer, stats: Stats, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Checks if the root directory exists. Creates it if it doesn't.
|
||||
*/
|
||||
private makeRootDirectory;
|
||||
/**
|
||||
* Helper function for findINode.
|
||||
* @param parent The parent directory of the file we are attempting to find.
|
||||
* @param filename The filename of the inode we are attempting to find, minus
|
||||
* the parent.
|
||||
* @param cb Passed an error or the ID of the file's inode in the file system.
|
||||
*/
|
||||
private _findINode;
|
||||
/**
|
||||
* Finds the Inode of the given path.
|
||||
* @param p The path to look up.
|
||||
* @param cb Passed an error or the Inode of the path p.
|
||||
* @todo memoize/cache
|
||||
*/
|
||||
private findINode;
|
||||
/**
|
||||
* Given the ID of a node, retrieves the corresponding Inode.
|
||||
* @param tx The transaction to use.
|
||||
* @param p The corresponding path to the file (used for error messages).
|
||||
* @param id The ID to look up.
|
||||
* @param cb Passed an error or the inode under the given id.
|
||||
*/
|
||||
private getINode;
|
||||
/**
|
||||
* Given the Inode of a directory, retrieves the corresponding directory
|
||||
* listing.
|
||||
*/
|
||||
private getDirListing;
|
||||
/**
|
||||
* Given a path to a directory, retrieves the corresponding INode and
|
||||
* directory listing.
|
||||
*/
|
||||
private findINodeAndDirListing;
|
||||
/**
|
||||
* Adds a new node under a random ID. Retries 5 times before giving up in
|
||||
* the exceedingly unlikely chance that we try to reuse a random GUID.
|
||||
* @param cb Passed an error or the GUID that the data was stored under.
|
||||
*/
|
||||
private addNewNode;
|
||||
/**
|
||||
* Commits a new file (well, a FILE or a DIRECTORY) to the file system with
|
||||
* the given mode.
|
||||
* Note: This will commit the transaction.
|
||||
* @param p The path to the new file.
|
||||
* @param type The type of the new file.
|
||||
* @param mode The mode to create the new file with.
|
||||
* @param data The data to store at the file's data node.
|
||||
* @param cb Passed an error or the Inode for the new file.
|
||||
*/
|
||||
private commitNewFile;
|
||||
/**
|
||||
* Remove all traces of the given path from the file system.
|
||||
* @param p The path to remove from the file system.
|
||||
* @param isDir Does the path belong to a directory, or a file?
|
||||
* @todo Update mtime.
|
||||
*/
|
||||
private removeEntry;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
68
sandpack-generated/static/browserfs11/node/generic/locked_fs.d.ts
vendored
Normal file
68
sandpack-generated/static/browserfs11/node/generic/locked_fs.d.ts
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
/// <reference types="node" />
|
||||
import { FileSystem, BFSOneArgCallback, BFSCallback } from '../core/file_system';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
import { default as Stats } from '../core/node_fs_stats';
|
||||
import { File } from '../core/file';
|
||||
/**
|
||||
* This class serializes access to an underlying async filesystem.
|
||||
* For example, on an OverlayFS instance with an async lower
|
||||
* directory operations like rename and rmdir may involve multiple
|
||||
* requests involving both the upper and lower filesystems -- they
|
||||
* are not executed in a single atomic step. OverlayFS uses this
|
||||
* LockedFS to avoid having to reason about the correctness of
|
||||
* multiple requests interleaving.
|
||||
*/
|
||||
export default class LockedFS<T extends FileSystem> implements FileSystem {
|
||||
private _fs;
|
||||
private _mu;
|
||||
constructor(fs: T);
|
||||
getName(): string;
|
||||
getFSUnlocked(): T;
|
||||
diskSpace(p: string, cb: (total: number, free: number) => any): void;
|
||||
isReadOnly(): boolean;
|
||||
supportsLinks(): boolean;
|
||||
supportsProps(): boolean;
|
||||
supportsSynch(): boolean;
|
||||
rename(oldPath: string, newPath: string, cb: BFSOneArgCallback): void;
|
||||
renameSync(oldPath: string, newPath: string): void;
|
||||
stat(p: string, isLstat: boolean, cb: BFSCallback<Stats>): void;
|
||||
statSync(p: string, isLstat: boolean): Stats;
|
||||
open(p: string, flag: FileFlag, mode: number, cb: BFSCallback<File>): void;
|
||||
openSync(p: string, flag: FileFlag, mode: number): File;
|
||||
unlink(p: string, cb: BFSOneArgCallback): void;
|
||||
unlinkSync(p: string): void;
|
||||
rmdir(p: string, cb: BFSOneArgCallback): void;
|
||||
rmdirSync(p: string): void;
|
||||
mkdir(p: string, mode: number, cb: BFSOneArgCallback): void;
|
||||
mkdirSync(p: string, mode: number): void;
|
||||
readdir(p: string, cb: BFSCallback<string[]>): void;
|
||||
readdirSync(p: string): string[];
|
||||
exists(p: string, cb: (exists: boolean) => void): void;
|
||||
existsSync(p: string): boolean;
|
||||
realpath(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}, cb: BFSCallback<string>): void;
|
||||
realpathSync(p: string, cache: {
|
||||
[path: string]: string;
|
||||
}): string;
|
||||
truncate(p: string, len: number, cb: BFSOneArgCallback): void;
|
||||
truncateSync(p: string, len: number): void;
|
||||
readFile(fname: string, encoding: string, flag: FileFlag, cb: BFSCallback<string | Buffer>): void;
|
||||
readFileSync(fname: string, encoding: string, flag: FileFlag): any;
|
||||
writeFile(fname: string, data: any, encoding: string, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
writeFileSync(fname: string, data: any, encoding: string, flag: FileFlag, mode: number): void;
|
||||
appendFile(fname: string, data: any, encoding: string, flag: FileFlag, mode: number, cb: BFSOneArgCallback): void;
|
||||
appendFileSync(fname: string, data: any, encoding: string, flag: FileFlag, mode: number): void;
|
||||
chmod(p: string, isLchmod: boolean, mode: number, cb: BFSOneArgCallback): void;
|
||||
chmodSync(p: string, isLchmod: boolean, mode: number): void;
|
||||
chown(p: string, isLchown: boolean, uid: number, gid: number, cb: BFSOneArgCallback): void;
|
||||
chownSync(p: string, isLchown: boolean, uid: number, gid: number): void;
|
||||
utimes(p: string, atime: Date, mtime: Date, cb: BFSOneArgCallback): void;
|
||||
utimesSync(p: string, atime: Date, mtime: Date): void;
|
||||
link(srcpath: string, dstpath: string, cb: BFSOneArgCallback): void;
|
||||
linkSync(srcpath: string, dstpath: string): void;
|
||||
symlink(srcpath: string, dstpath: string, type: string, cb: BFSOneArgCallback): void;
|
||||
symlinkSync(srcpath: string, dstpath: string, type: string): void;
|
||||
readlink(p: string, cb: BFSCallback<string>): void;
|
||||
readlinkSync(p: string): string;
|
||||
}
|
||||
328
sandpack-generated/static/browserfs11/node/generic/locked_fs.js
Normal file
328
sandpack-generated/static/browserfs11/node/generic/locked_fs.js
Normal file
@@ -0,0 +1,328 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var mutex_1 = require("./mutex");
|
||||
/**
|
||||
* This class serializes access to an underlying async filesystem.
|
||||
* For example, on an OverlayFS instance with an async lower
|
||||
* directory operations like rename and rmdir may involve multiple
|
||||
* requests involving both the upper and lower filesystems -- they
|
||||
* are not executed in a single atomic step. OverlayFS uses this
|
||||
* LockedFS to avoid having to reason about the correctness of
|
||||
* multiple requests interleaving.
|
||||
*/
|
||||
var LockedFS = /** @class */ (function () {
|
||||
function LockedFS(fs) {
|
||||
this._fs = fs;
|
||||
this._mu = new mutex_1.default();
|
||||
}
|
||||
LockedFS.prototype.getName = function () {
|
||||
return 'LockedFS<' + this._fs.getName() + '>';
|
||||
};
|
||||
LockedFS.prototype.getFSUnlocked = function () {
|
||||
return this._fs;
|
||||
};
|
||||
LockedFS.prototype.diskSpace = function (p, cb) {
|
||||
// FIXME: should this lock?
|
||||
this._fs.diskSpace(p, cb);
|
||||
};
|
||||
LockedFS.prototype.isReadOnly = function () {
|
||||
return this._fs.isReadOnly();
|
||||
};
|
||||
LockedFS.prototype.supportsLinks = function () {
|
||||
return this._fs.supportsLinks();
|
||||
};
|
||||
LockedFS.prototype.supportsProps = function () {
|
||||
return this._fs.supportsProps();
|
||||
};
|
||||
LockedFS.prototype.supportsSynch = function () {
|
||||
return this._fs.supportsSynch();
|
||||
};
|
||||
LockedFS.prototype.rename = function (oldPath, newPath, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.rename(oldPath, newPath, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.renameSync = function (oldPath, newPath) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.renameSync(oldPath, newPath);
|
||||
};
|
||||
LockedFS.prototype.stat = function (p, isLstat, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.stat(p, isLstat, function (err, stat) {
|
||||
_this._mu.unlock();
|
||||
cb(err, stat);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.statSync = function (p, isLstat) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.statSync(p, isLstat);
|
||||
};
|
||||
LockedFS.prototype.open = function (p, flag, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.open(p, flag, mode, function (err, fd) {
|
||||
_this._mu.unlock();
|
||||
cb(err, fd);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.openSync = function (p, flag, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.openSync(p, flag, mode);
|
||||
};
|
||||
LockedFS.prototype.unlink = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.unlink(p, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.unlinkSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.unlinkSync(p);
|
||||
};
|
||||
LockedFS.prototype.rmdir = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.rmdir(p, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.rmdirSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.rmdirSync(p);
|
||||
};
|
||||
LockedFS.prototype.mkdir = function (p, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.mkdir(p, mode, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.mkdirSync = function (p, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.mkdirSync(p, mode);
|
||||
};
|
||||
LockedFS.prototype.readdir = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.readdir(p, function (err, files) {
|
||||
_this._mu.unlock();
|
||||
cb(err, files);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.readdirSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.readdirSync(p);
|
||||
};
|
||||
LockedFS.prototype.exists = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.exists(p, function (exists) {
|
||||
_this._mu.unlock();
|
||||
cb(exists);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.existsSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.existsSync(p);
|
||||
};
|
||||
LockedFS.prototype.realpath = function (p, cache, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.realpath(p, cache, function (err, resolvedPath) {
|
||||
_this._mu.unlock();
|
||||
cb(err, resolvedPath);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.realpathSync = function (p, cache) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.realpathSync(p, cache);
|
||||
};
|
||||
LockedFS.prototype.truncate = function (p, len, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.truncate(p, len, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.truncateSync = function (p, len) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.truncateSync(p, len);
|
||||
};
|
||||
LockedFS.prototype.readFile = function (fname, encoding, flag, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.readFile(fname, encoding, flag, function (err, data) {
|
||||
_this._mu.unlock();
|
||||
cb(err, data);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.readFileSync = function (fname, encoding, flag) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.readFileSync(fname, encoding, flag);
|
||||
};
|
||||
LockedFS.prototype.writeFile = function (fname, data, encoding, flag, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.writeFile(fname, data, encoding, flag, mode, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.writeFileSync = function (fname, data, encoding, flag, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.writeFileSync(fname, data, encoding, flag, mode);
|
||||
};
|
||||
LockedFS.prototype.appendFile = function (fname, data, encoding, flag, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.appendFile(fname, data, encoding, flag, mode, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.appendFileSync = function (fname, data, encoding, flag, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.appendFileSync(fname, data, encoding, flag, mode);
|
||||
};
|
||||
LockedFS.prototype.chmod = function (p, isLchmod, mode, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.chmod(p, isLchmod, mode, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.chmodSync = function (p, isLchmod, mode) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.chmodSync(p, isLchmod, mode);
|
||||
};
|
||||
LockedFS.prototype.chown = function (p, isLchown, uid, gid, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.chown(p, isLchown, uid, gid, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.chownSync = function (p, isLchown, uid, gid) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.chownSync(p, isLchown, uid, gid);
|
||||
};
|
||||
LockedFS.prototype.utimes = function (p, atime, mtime, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.utimes(p, atime, mtime, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.utimesSync = function (p, atime, mtime) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.utimesSync(p, atime, mtime);
|
||||
};
|
||||
LockedFS.prototype.link = function (srcpath, dstpath, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.link(srcpath, dstpath, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.linkSync = function (srcpath, dstpath) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.linkSync(srcpath, dstpath);
|
||||
};
|
||||
LockedFS.prototype.symlink = function (srcpath, dstpath, type, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.symlink(srcpath, dstpath, type, function (err) {
|
||||
_this._mu.unlock();
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.symlinkSync = function (srcpath, dstpath, type) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.symlinkSync(srcpath, dstpath, type);
|
||||
};
|
||||
LockedFS.prototype.readlink = function (p, cb) {
|
||||
var _this = this;
|
||||
this._mu.lock(function () {
|
||||
_this._fs.readlink(p, function (err, linkString) {
|
||||
_this._mu.unlock();
|
||||
cb(err, linkString);
|
||||
});
|
||||
});
|
||||
};
|
||||
LockedFS.prototype.readlinkSync = function (p) {
|
||||
if (this._mu.isLocked()) {
|
||||
throw new Error('invalid sync call');
|
||||
}
|
||||
return this._fs.readlinkSync(p);
|
||||
};
|
||||
return LockedFS;
|
||||
}());
|
||||
exports.default = LockedFS;
|
||||
//# sourceMappingURL=locked_fs.js.map
|
||||
12
sandpack-generated/static/browserfs11/node/generic/mutex.d.ts
vendored
Normal file
12
sandpack-generated/static/browserfs11/node/generic/mutex.d.ts
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
/**
|
||||
* Non-recursive mutex
|
||||
* @hidden
|
||||
*/
|
||||
export default class Mutex {
|
||||
private _locked;
|
||||
private _waiters;
|
||||
lock(cb: Function): void;
|
||||
unlock(): void;
|
||||
tryLock(): boolean;
|
||||
isLocked(): boolean;
|
||||
}
|
||||
51
sandpack-generated/static/browserfs11/node/generic/mutex.js
Normal file
51
sandpack-generated/static/browserfs11/node/generic/mutex.js
Normal file
@@ -0,0 +1,51 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var setImmediate_1 = require("../generic/setImmediate");
|
||||
/**
|
||||
* Non-recursive mutex
|
||||
* @hidden
|
||||
*/
|
||||
var Mutex = /** @class */ (function () {
|
||||
function Mutex() {
|
||||
this._locked = false;
|
||||
this._waiters = [];
|
||||
}
|
||||
Mutex.prototype.lock = function (cb) {
|
||||
if (this._locked) {
|
||||
this._waiters.push(cb);
|
||||
return;
|
||||
}
|
||||
this._locked = true;
|
||||
cb();
|
||||
};
|
||||
Mutex.prototype.unlock = function () {
|
||||
if (!this._locked) {
|
||||
throw new Error('unlock of a non-locked mutex');
|
||||
}
|
||||
var next = this._waiters.shift();
|
||||
// don't unlock - we want to queue up next for the
|
||||
// _end_ of the current task execution, but we don't
|
||||
// want it to be called inline with whatever the
|
||||
// current stack is. This way we still get the nice
|
||||
// behavior that an unlock immediately followed by a
|
||||
// lock won't cause starvation.
|
||||
if (next) {
|
||||
(0, setImmediate_1.default)(next);
|
||||
return;
|
||||
}
|
||||
this._locked = false;
|
||||
};
|
||||
Mutex.prototype.tryLock = function () {
|
||||
if (this._locked) {
|
||||
return false;
|
||||
}
|
||||
this._locked = true;
|
||||
return true;
|
||||
};
|
||||
Mutex.prototype.isLocked = function () {
|
||||
return this._locked;
|
||||
};
|
||||
return Mutex;
|
||||
}());
|
||||
exports.default = Mutex;
|
||||
//# sourceMappingURL=mutex.js.map
|
||||
209
sandpack-generated/static/browserfs11/node/generic/preload_file.d.ts
vendored
Normal file
209
sandpack-generated/static/browserfs11/node/generic/preload_file.d.ts
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
/// <reference types="node" />
|
||||
import { BaseFile, File } from '../core/file';
|
||||
import { FileSystem, BFSOneArgCallback, BFSCallback, BFSThreeArgCallback } from '../core/file_system';
|
||||
import Stats from '../core/node_fs_stats';
|
||||
import { FileFlag } from '../core/file_flag';
|
||||
/**
|
||||
* An implementation of the File interface that operates on a file that is
|
||||
* completely in-memory. PreloadFiles are backed by a Buffer.
|
||||
*
|
||||
* This is also an abstract class, as it lacks an implementation of 'sync' and
|
||||
* 'close'. Each filesystem that wishes to use this file representation must
|
||||
* extend this class and implement those two methods.
|
||||
* @todo 'close' lever that disables functionality once closed.
|
||||
*/
|
||||
export default class PreloadFile<T extends FileSystem> extends BaseFile {
|
||||
protected _fs: T;
|
||||
private _pos;
|
||||
private _path;
|
||||
private _stat;
|
||||
private _flag;
|
||||
private _buffer;
|
||||
private _dirty;
|
||||
/**
|
||||
* Creates a file with the given path and, optionally, the given contents. Note
|
||||
* that, if contents is specified, it will be mutated by the file!
|
||||
* @param _fs The file system that created the file.
|
||||
* @param _path
|
||||
* @param _mode The mode that the file was opened using.
|
||||
* Dictates permissions and where the file pointer starts.
|
||||
* @param _stat The stats object for the given file.
|
||||
* PreloadFile will mutate this object. Note that this object must contain
|
||||
* the appropriate mode that the file was opened as.
|
||||
* @param contents A buffer containing the entire
|
||||
* contents of the file. PreloadFile will mutate this buffer. If not
|
||||
* specified, we assume it is a new file.
|
||||
*/
|
||||
constructor(_fs: T, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
/**
|
||||
* NONSTANDARD: Get the underlying buffer for this file. !!DO NOT MUTATE!! Will mess up dirty tracking.
|
||||
*/
|
||||
getBuffer(): Buffer;
|
||||
/**
|
||||
* NONSTANDARD: Get underlying stats for this file. !!DO NOT MUTATE!!
|
||||
*/
|
||||
getStats(): Stats;
|
||||
getFlag(): FileFlag;
|
||||
/**
|
||||
* Get the path to this file.
|
||||
* @return [String] The path to the file.
|
||||
*/
|
||||
getPath(): string;
|
||||
/**
|
||||
* Get the current file position.
|
||||
*
|
||||
* We emulate the following bug mentioned in the Node documentation:
|
||||
* > On Linux, positional writes don't work when the file is opened in append
|
||||
* mode. The kernel ignores the position argument and always appends the data
|
||||
* to the end of the file.
|
||||
* @return [Number] The current file position.
|
||||
*/
|
||||
getPos(): number;
|
||||
/**
|
||||
* Advance the current file position by the indicated number of positions.
|
||||
* @param [Number] delta
|
||||
*/
|
||||
advancePos(delta: number): number;
|
||||
/**
|
||||
* Set the file position.
|
||||
* @param [Number] newPos
|
||||
*/
|
||||
setPos(newPos: number): number;
|
||||
/**
|
||||
* **Core**: Asynchronous sync. Must be implemented by subclasses of this
|
||||
* class.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous sync.
|
||||
*/
|
||||
syncSync(): void;
|
||||
/**
|
||||
* **Core**: Asynchronous close. Must be implemented by subclasses of this
|
||||
* class.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* **Core**: Synchronous close.
|
||||
*/
|
||||
closeSync(): void;
|
||||
/**
|
||||
* Asynchronous `stat`.
|
||||
* @param [Function(BrowserFS.ApiError, BrowserFS.node.fs.Stats)] cb
|
||||
*/
|
||||
stat(cb: BFSCallback<Stats>): void;
|
||||
/**
|
||||
* Synchronous `stat`.
|
||||
*/
|
||||
statSync(): Stats;
|
||||
/**
|
||||
* Asynchronous truncate.
|
||||
* @param [Number] len
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
truncate(len: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous truncate.
|
||||
* @param [Number] len
|
||||
*/
|
||||
truncateSync(len: number): void;
|
||||
/**
|
||||
* Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.write multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param [BrowserFS.node.Buffer] buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param [Number] offset Offset in the buffer to start reading data from.
|
||||
* @param [Number] length The amount of bytes to write to the file.
|
||||
* @param [Number] position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @param [Function(BrowserFS.ApiError, Number, BrowserFS.node.Buffer)]
|
||||
* cb The number specifies the number of bytes written into the file.
|
||||
*/
|
||||
write(buffer: Buffer, offset: number, length: number, position: number, cb: BFSThreeArgCallback<number, Buffer>): void;
|
||||
/**
|
||||
* Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.writeSync multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param [BrowserFS.node.Buffer] buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param [Number] offset Offset in the buffer to start reading data from.
|
||||
* @param [Number] length The amount of bytes to write to the file.
|
||||
* @param [Number] position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @return [Number]
|
||||
*/
|
||||
writeSync(buffer: Buffer, offset: number, length: number, position: number): number;
|
||||
/**
|
||||
* Read data from the file.
|
||||
* @param [BrowserFS.node.Buffer] buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param [Number] offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param [Number] length An integer specifying the number of bytes to read.
|
||||
* @param [Number] position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @param [Function(BrowserFS.ApiError, Number, BrowserFS.node.Buffer)] cb The
|
||||
* number is the number of bytes read
|
||||
*/
|
||||
read(buffer: Buffer, offset: number, length: number, position: number, cb: BFSThreeArgCallback<number, Buffer>): void;
|
||||
/**
|
||||
* Read data from the file.
|
||||
* @param [BrowserFS.node.Buffer] buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param [Number] offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param [Number] length An integer specifying the number of bytes to read.
|
||||
* @param [Number] position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @return [Number]
|
||||
*/
|
||||
readSync(buffer: Buffer, offset: number, length: number, position: number): number;
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param [Number|String] mode
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
chmod(mode: number, cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param [Number] mode
|
||||
*/
|
||||
chmodSync(mode: number): void;
|
||||
protected isDirty(): boolean;
|
||||
/**
|
||||
* Resets the dirty bit. Should only be called after a sync has completed successfully.
|
||||
*/
|
||||
protected resetDirty(): void;
|
||||
}
|
||||
/**
|
||||
* File class for the InMemory and XHR file systems.
|
||||
* Doesn't sync to anything, so it works nicely for memory-only files.
|
||||
*/
|
||||
export declare class NoSyncFile<T extends FileSystem> extends PreloadFile<T> implements File {
|
||||
constructor(_fs: T, _path: string, _flag: FileFlag, _stat: Stats, contents?: Buffer);
|
||||
/**
|
||||
* Asynchronous sync. Doesn't do anything, simply calls the cb.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
sync(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous sync. Doesn't do anything.
|
||||
*/
|
||||
syncSync(): void;
|
||||
/**
|
||||
* Asynchronous close. Doesn't do anything, simply calls the cb.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
close(cb: BFSOneArgCallback): void;
|
||||
/**
|
||||
* Synchronous close. Doesn't do anything.
|
||||
*/
|
||||
closeSync(): void;
|
||||
}
|
||||
@@ -0,0 +1,408 @@
|
||||
"use strict";
|
||||
var __extends = (this && this.__extends) || (function () {
|
||||
var extendStatics = function (d, b) {
|
||||
extendStatics = Object.setPrototypeOf ||
|
||||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
|
||||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
|
||||
return extendStatics(d, b);
|
||||
};
|
||||
return function (d, b) {
|
||||
if (typeof b !== "function" && b !== null)
|
||||
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
|
||||
extendStatics(d, b);
|
||||
function __() { this.constructor = d; }
|
||||
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.NoSyncFile = void 0;
|
||||
var file_1 = require("../core/file");
|
||||
var node_fs_stats_1 = require("../core/node_fs_stats");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
var node_fs_1 = require("../core/node_fs");
|
||||
var util_1 = require("../core/util");
|
||||
/**
|
||||
* An implementation of the File interface that operates on a file that is
|
||||
* completely in-memory. PreloadFiles are backed by a Buffer.
|
||||
*
|
||||
* This is also an abstract class, as it lacks an implementation of 'sync' and
|
||||
* 'close'. Each filesystem that wishes to use this file representation must
|
||||
* extend this class and implement those two methods.
|
||||
* @todo 'close' lever that disables functionality once closed.
|
||||
*/
|
||||
var PreloadFile = /** @class */ (function (_super) {
|
||||
__extends(PreloadFile, _super);
|
||||
/**
|
||||
* Creates a file with the given path and, optionally, the given contents. Note
|
||||
* that, if contents is specified, it will be mutated by the file!
|
||||
* @param _fs The file system that created the file.
|
||||
* @param _path
|
||||
* @param _mode The mode that the file was opened using.
|
||||
* Dictates permissions and where the file pointer starts.
|
||||
* @param _stat The stats object for the given file.
|
||||
* PreloadFile will mutate this object. Note that this object must contain
|
||||
* the appropriate mode that the file was opened as.
|
||||
* @param contents A buffer containing the entire
|
||||
* contents of the file. PreloadFile will mutate this buffer. If not
|
||||
* specified, we assume it is a new file.
|
||||
*/
|
||||
function PreloadFile(_fs, _path, _flag, _stat, contents) {
|
||||
var _this = _super.call(this) || this;
|
||||
_this._pos = 0;
|
||||
_this._dirty = false;
|
||||
_this._fs = _fs;
|
||||
_this._path = _path;
|
||||
_this._flag = _flag;
|
||||
_this._stat = _stat;
|
||||
_this._buffer = contents ? contents : (0, util_1.emptyBuffer)();
|
||||
// Note: This invariant is *not* maintained once the file starts getting
|
||||
// modified.
|
||||
// Note: Only actually matters if file is readable, as writeable modes may
|
||||
// truncate/append to file.
|
||||
if (_this._stat.size !== _this._buffer.length && _this._flag.isReadable()) {
|
||||
throw new Error("Invalid buffer: Buffer is ".concat(_this._buffer.length, " long, yet Stats object specifies that file is ").concat(_this._stat.size, " long."));
|
||||
}
|
||||
return _this;
|
||||
}
|
||||
/**
|
||||
* NONSTANDARD: Get the underlying buffer for this file. !!DO NOT MUTATE!! Will mess up dirty tracking.
|
||||
*/
|
||||
PreloadFile.prototype.getBuffer = function () {
|
||||
return this._buffer;
|
||||
};
|
||||
/**
|
||||
* NONSTANDARD: Get underlying stats for this file. !!DO NOT MUTATE!!
|
||||
*/
|
||||
PreloadFile.prototype.getStats = function () {
|
||||
return this._stat;
|
||||
};
|
||||
PreloadFile.prototype.getFlag = function () {
|
||||
return this._flag;
|
||||
};
|
||||
/**
|
||||
* Get the path to this file.
|
||||
* @return [String] The path to the file.
|
||||
*/
|
||||
PreloadFile.prototype.getPath = function () {
|
||||
return this._path;
|
||||
};
|
||||
/**
|
||||
* Get the current file position.
|
||||
*
|
||||
* We emulate the following bug mentioned in the Node documentation:
|
||||
* > On Linux, positional writes don't work when the file is opened in append
|
||||
* mode. The kernel ignores the position argument and always appends the data
|
||||
* to the end of the file.
|
||||
* @return [Number] The current file position.
|
||||
*/
|
||||
PreloadFile.prototype.getPos = function () {
|
||||
if (this._flag.isAppendable()) {
|
||||
return this._stat.size;
|
||||
}
|
||||
return this._pos;
|
||||
};
|
||||
/**
|
||||
* Advance the current file position by the indicated number of positions.
|
||||
* @param [Number] delta
|
||||
*/
|
||||
PreloadFile.prototype.advancePos = function (delta) {
|
||||
return this._pos += delta;
|
||||
};
|
||||
/**
|
||||
* Set the file position.
|
||||
* @param [Number] newPos
|
||||
*/
|
||||
PreloadFile.prototype.setPos = function (newPos) {
|
||||
return this._pos = newPos;
|
||||
};
|
||||
/**
|
||||
* **Core**: Asynchronous sync. Must be implemented by subclasses of this
|
||||
* class.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
PreloadFile.prototype.sync = function (cb) {
|
||||
try {
|
||||
this.syncSync();
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* **Core**: Synchronous sync.
|
||||
*/
|
||||
PreloadFile.prototype.syncSync = function () {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
/**
|
||||
* **Core**: Asynchronous close. Must be implemented by subclasses of this
|
||||
* class.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
PreloadFile.prototype.close = function (cb) {
|
||||
try {
|
||||
this.closeSync();
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* **Core**: Synchronous close.
|
||||
*/
|
||||
PreloadFile.prototype.closeSync = function () {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
};
|
||||
/**
|
||||
* Asynchronous `stat`.
|
||||
* @param [Function(BrowserFS.ApiError, BrowserFS.node.fs.Stats)] cb
|
||||
*/
|
||||
PreloadFile.prototype.stat = function (cb) {
|
||||
try {
|
||||
cb(null, node_fs_stats_1.default.clone(this._stat));
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Synchronous `stat`.
|
||||
*/
|
||||
PreloadFile.prototype.statSync = function () {
|
||||
return node_fs_stats_1.default.clone(this._stat);
|
||||
};
|
||||
/**
|
||||
* Asynchronous truncate.
|
||||
* @param [Number] len
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
PreloadFile.prototype.truncate = function (len, cb) {
|
||||
try {
|
||||
this.truncateSync(len);
|
||||
if (this._flag.isSynchronous() && !node_fs_1.default.getRootFS().supportsSynch()) {
|
||||
this.sync(cb);
|
||||
}
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
return cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Synchronous truncate.
|
||||
* @param [Number] len
|
||||
*/
|
||||
PreloadFile.prototype.truncateSync = function (len) {
|
||||
this._dirty = true;
|
||||
if (!this._flag.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, 'File not opened with a writeable mode.');
|
||||
}
|
||||
this._stat.mtimeMs = Date.now();
|
||||
if (len > this._buffer.length) {
|
||||
var buf = Buffer.alloc(len - this._buffer.length, 0);
|
||||
// Write will set @_stat.size for us.
|
||||
this.writeSync(buf, 0, buf.length, this._buffer.length);
|
||||
if (this._flag.isSynchronous() && node_fs_1.default.getRootFS().supportsSynch()) {
|
||||
this.syncSync();
|
||||
}
|
||||
return;
|
||||
}
|
||||
this._stat.size = len;
|
||||
// Truncate buffer to 'len'.
|
||||
var newBuff = Buffer.alloc(len);
|
||||
this._buffer.copy(newBuff, 0, 0, len);
|
||||
this._buffer = newBuff;
|
||||
if (this._flag.isSynchronous() && node_fs_1.default.getRootFS().supportsSynch()) {
|
||||
this.syncSync();
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.write multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param [BrowserFS.node.Buffer] buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param [Number] offset Offset in the buffer to start reading data from.
|
||||
* @param [Number] length The amount of bytes to write to the file.
|
||||
* @param [Number] position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @param [Function(BrowserFS.ApiError, Number, BrowserFS.node.Buffer)]
|
||||
* cb The number specifies the number of bytes written into the file.
|
||||
*/
|
||||
PreloadFile.prototype.write = function (buffer, offset, length, position, cb) {
|
||||
try {
|
||||
cb(null, this.writeSync(buffer, offset, length, position), buffer);
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Write buffer to the file.
|
||||
* Note that it is unsafe to use fs.writeSync multiple times on the same file
|
||||
* without waiting for the callback.
|
||||
* @param [BrowserFS.node.Buffer] buffer Buffer containing the data to write to
|
||||
* the file.
|
||||
* @param [Number] offset Offset in the buffer to start reading data from.
|
||||
* @param [Number] length The amount of bytes to write to the file.
|
||||
* @param [Number] position Offset from the beginning of the file where this
|
||||
* data should be written. If position is null, the data will be written at
|
||||
* the current position.
|
||||
* @return [Number]
|
||||
*/
|
||||
PreloadFile.prototype.writeSync = function (buffer, offset, length, position) {
|
||||
this._dirty = true;
|
||||
if (position === undefined || position === null) {
|
||||
position = this.getPos();
|
||||
}
|
||||
if (!this._flag.isWriteable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, 'File not opened with a writeable mode.');
|
||||
}
|
||||
var endFp = position + length;
|
||||
if (endFp > this._stat.size) {
|
||||
this._stat.size = endFp;
|
||||
if (endFp > this._buffer.length) {
|
||||
// Extend the buffer!
|
||||
var newBuff = Buffer.alloc(endFp);
|
||||
this._buffer.copy(newBuff);
|
||||
this._buffer = newBuff;
|
||||
}
|
||||
}
|
||||
var len = buffer.copy(this._buffer, position, offset, offset + length);
|
||||
this._stat.mtimeMs = Date.now();
|
||||
if (this._flag.isSynchronous()) {
|
||||
this.syncSync();
|
||||
return len;
|
||||
}
|
||||
this.setPos(position + len);
|
||||
return len;
|
||||
};
|
||||
/**
|
||||
* Read data from the file.
|
||||
* @param [BrowserFS.node.Buffer] buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param [Number] offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param [Number] length An integer specifying the number of bytes to read.
|
||||
* @param [Number] position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @param [Function(BrowserFS.ApiError, Number, BrowserFS.node.Buffer)] cb The
|
||||
* number is the number of bytes read
|
||||
*/
|
||||
PreloadFile.prototype.read = function (buffer, offset, length, position, cb) {
|
||||
try {
|
||||
cb(null, this.readSync(buffer, offset, length, position), buffer);
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Read data from the file.
|
||||
* @param [BrowserFS.node.Buffer] buffer The buffer that the data will be
|
||||
* written to.
|
||||
* @param [Number] offset The offset within the buffer where writing will
|
||||
* start.
|
||||
* @param [Number] length An integer specifying the number of bytes to read.
|
||||
* @param [Number] position An integer specifying where to begin reading from
|
||||
* in the file. If position is null, data will be read from the current file
|
||||
* position.
|
||||
* @return [Number]
|
||||
*/
|
||||
PreloadFile.prototype.readSync = function (buffer, offset, length, position) {
|
||||
if (!this._flag.isReadable()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EPERM, 'File not opened with a readable mode.');
|
||||
}
|
||||
if (position === undefined || position === null) {
|
||||
position = this.getPos();
|
||||
}
|
||||
var endRead = position + length;
|
||||
if (endRead > this._stat.size) {
|
||||
length = this._stat.size - position;
|
||||
}
|
||||
var rv = this._buffer.copy(buffer, offset, position, position + length);
|
||||
this._stat.atimeMs = Date.now();
|
||||
this._pos = position + length;
|
||||
return rv;
|
||||
};
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param [Number|String] mode
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
PreloadFile.prototype.chmod = function (mode, cb) {
|
||||
try {
|
||||
this.chmodSync(mode);
|
||||
cb();
|
||||
}
|
||||
catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Asynchronous `fchmod`.
|
||||
* @param [Number] mode
|
||||
*/
|
||||
PreloadFile.prototype.chmodSync = function (mode) {
|
||||
if (!this._fs.supportsProps()) {
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.ENOTSUP);
|
||||
}
|
||||
this._dirty = true;
|
||||
this._stat.chmod(mode);
|
||||
this.syncSync();
|
||||
};
|
||||
PreloadFile.prototype.isDirty = function () {
|
||||
return this._dirty;
|
||||
};
|
||||
/**
|
||||
* Resets the dirty bit. Should only be called after a sync has completed successfully.
|
||||
*/
|
||||
PreloadFile.prototype.resetDirty = function () {
|
||||
this._dirty = false;
|
||||
};
|
||||
return PreloadFile;
|
||||
}(file_1.BaseFile));
|
||||
exports.default = PreloadFile;
|
||||
/**
|
||||
* File class for the InMemory and XHR file systems.
|
||||
* Doesn't sync to anything, so it works nicely for memory-only files.
|
||||
*/
|
||||
var NoSyncFile = /** @class */ (function (_super) {
|
||||
__extends(NoSyncFile, _super);
|
||||
function NoSyncFile(_fs, _path, _flag, _stat, contents) {
|
||||
return _super.call(this, _fs, _path, _flag, _stat, contents) || this;
|
||||
}
|
||||
/**
|
||||
* Asynchronous sync. Doesn't do anything, simply calls the cb.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
NoSyncFile.prototype.sync = function (cb) {
|
||||
cb();
|
||||
};
|
||||
/**
|
||||
* Synchronous sync. Doesn't do anything.
|
||||
*/
|
||||
NoSyncFile.prototype.syncSync = function () {
|
||||
// NOP.
|
||||
};
|
||||
/**
|
||||
* Asynchronous close. Doesn't do anything, simply calls the cb.
|
||||
* @param [Function(BrowserFS.ApiError)] cb
|
||||
*/
|
||||
NoSyncFile.prototype.close = function (cb) {
|
||||
cb();
|
||||
};
|
||||
/**
|
||||
* Synchronous close. Doesn't do anything.
|
||||
*/
|
||||
NoSyncFile.prototype.closeSync = function () {
|
||||
// NOP.
|
||||
};
|
||||
return NoSyncFile;
|
||||
}(PreloadFile));
|
||||
exports.NoSyncFile = NoSyncFile;
|
||||
//# sourceMappingURL=preload_file.js.map
|
||||
5
sandpack-generated/static/browserfs11/node/generic/setImmediate.d.ts
vendored
Normal file
5
sandpack-generated/static/browserfs11/node/generic/setImmediate.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
declare let bfsSetImmediate: (cb: Function, ...args: any[]) => any;
|
||||
export default bfsSetImmediate;
|
||||
@@ -0,0 +1,96 @@
|
||||
"use strict";
|
||||
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
|
||||
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
||||
if (ar || !(i in from)) {
|
||||
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
||||
ar[i] = from[i];
|
||||
}
|
||||
}
|
||||
return to.concat(ar || Array.prototype.slice.call(from));
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
var global_1 = require("../core/global");
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
var bfsSetImmediate;
|
||||
if (typeof (setImmediate) !== "undefined") {
|
||||
bfsSetImmediate = setImmediate;
|
||||
}
|
||||
else {
|
||||
var gScope_1 = global_1.default;
|
||||
var timeouts_1 = [];
|
||||
var messageName_1 = "zero-timeout-message";
|
||||
var canUsePostMessage = function () {
|
||||
if (typeof gScope_1.importScripts !== 'undefined' || !gScope_1.postMessage) {
|
||||
return false;
|
||||
}
|
||||
var postMessageIsAsync = true;
|
||||
var oldOnMessage = gScope_1.onmessage;
|
||||
gScope_1.onmessage = function () {
|
||||
postMessageIsAsync = false;
|
||||
};
|
||||
gScope_1.postMessage('', '*');
|
||||
gScope_1.onmessage = oldOnMessage;
|
||||
return postMessageIsAsync;
|
||||
};
|
||||
if (canUsePostMessage()) {
|
||||
bfsSetImmediate = function (fn) {
|
||||
var args = [];
|
||||
for (var _i = 1; _i < arguments.length; _i++) {
|
||||
args[_i - 1] = arguments[_i];
|
||||
}
|
||||
timeouts_1.push({ fn: fn, args: args });
|
||||
gScope_1.postMessage(messageName_1, "*");
|
||||
};
|
||||
var handleMessage = function (event) {
|
||||
if (event.source === self && event.data === messageName_1) {
|
||||
if (event.stopPropagation) {
|
||||
event.stopPropagation();
|
||||
}
|
||||
else {
|
||||
event.cancelBubble = true;
|
||||
}
|
||||
if (timeouts_1.length > 0) {
|
||||
var _a = timeouts_1.shift(), fn = _a.fn, args = _a.args;
|
||||
return fn.apply(void 0, args);
|
||||
}
|
||||
}
|
||||
};
|
||||
if (gScope_1.addEventListener) {
|
||||
gScope_1.addEventListener('message', handleMessage, true);
|
||||
}
|
||||
else {
|
||||
gScope_1.attachEvent('onmessage', handleMessage);
|
||||
}
|
||||
}
|
||||
else if (gScope_1.MessageChannel) {
|
||||
// WebWorker MessageChannel
|
||||
var channel_1 = new gScope_1.MessageChannel();
|
||||
channel_1.port1.onmessage = function (event) {
|
||||
if (timeouts_1.length > 0) {
|
||||
var _a = timeouts_1.shift(), fn = _a.fn, args = _a.args;
|
||||
return fn.apply(void 0, args);
|
||||
}
|
||||
};
|
||||
bfsSetImmediate = function (fn) {
|
||||
var args = [];
|
||||
for (var _i = 1; _i < arguments.length; _i++) {
|
||||
args[_i - 1] = arguments[_i];
|
||||
}
|
||||
timeouts_1.push({ fn: fn, args: args });
|
||||
channel_1.port2.postMessage('');
|
||||
};
|
||||
}
|
||||
else {
|
||||
bfsSetImmediate = function (fn) {
|
||||
var args = [];
|
||||
for (var _i = 1; _i < arguments.length; _i++) {
|
||||
args[_i - 1] = arguments[_i];
|
||||
}
|
||||
return setTimeout.apply(void 0, __spreadArray([fn, 0], args, false));
|
||||
};
|
||||
}
|
||||
}
|
||||
exports.default = bfsSetImmediate;
|
||||
//# sourceMappingURL=setImmediate.js.map
|
||||
42
sandpack-generated/static/browserfs11/node/generic/xhr.d.ts
vendored
Normal file
42
sandpack-generated/static/browserfs11/node/generic/xhr.d.ts
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Contains utility methods for performing a variety of tasks with
|
||||
* XmlHttpRequest across browsers.
|
||||
*/
|
||||
/// <reference types="node" />
|
||||
import { ApiError } from '../core/api_error';
|
||||
import { BFSCallback } from '../core/file_system';
|
||||
export declare const xhrIsAvailable: boolean;
|
||||
/**
|
||||
* Asynchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
export declare let asyncDownloadFile: {
|
||||
(p: string, type: 'buffer', cb: BFSCallback<Buffer>): void;
|
||||
(p: string, type: 'json', cb: BFSCallback<any>): void;
|
||||
(p: string, type: string, cb: BFSCallback<any>): void;
|
||||
};
|
||||
/**
|
||||
* Synchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
export declare let syncDownloadFile: {
|
||||
(p: string, type: 'buffer'): Buffer;
|
||||
(p: string, type: 'json'): any;
|
||||
(p: string, type: string): any;
|
||||
};
|
||||
/**
|
||||
* Synchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function getFileSizeSync(p: string): number;
|
||||
/**
|
||||
* Asynchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
export declare function getFileSizeAsync(p: string, cb: (err: ApiError, size?: number) => void): void;
|
||||
199
sandpack-generated/static/browserfs11/node/generic/xhr.js
Normal file
199
sandpack-generated/static/browserfs11/node/generic/xhr.js
Normal file
@@ -0,0 +1,199 @@
|
||||
"use strict";
|
||||
/**
|
||||
* Contains utility methods for performing a variety of tasks with
|
||||
* XmlHttpRequest across browsers.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getFileSizeAsync = exports.getFileSizeSync = exports.syncDownloadFile = exports.asyncDownloadFile = exports.xhrIsAvailable = void 0;
|
||||
var util_1 = require("../core/util");
|
||||
var api_error_1 = require("../core/api_error");
|
||||
exports.xhrIsAvailable = (typeof (XMLHttpRequest) !== "undefined" && XMLHttpRequest !== null);
|
||||
function asyncDownloadFileModern(p, type, cb) {
|
||||
var req = new XMLHttpRequest();
|
||||
req.open('GET', p, true);
|
||||
var jsonSupported = true;
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
req.responseType = 'arraybuffer';
|
||||
break;
|
||||
case 'json':
|
||||
// Some browsers don't support the JSON response type.
|
||||
// They either reset responseType, or throw an exception.
|
||||
// @see https://github.com/Modernizr/Modernizr/blob/master/src/testXhrType.js
|
||||
try {
|
||||
req.responseType = 'json';
|
||||
jsonSupported = req.responseType === 'json';
|
||||
}
|
||||
catch (e) {
|
||||
jsonSupported = false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid download type: " + type));
|
||||
}
|
||||
req.onreadystatechange = function (e) {
|
||||
if (req.readyState === 4) {
|
||||
if (req.status === 200) {
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
// XXX: WebKit-based browsers return *null* when XHRing an empty file.
|
||||
return cb(null, req.response ? Buffer.from(req.response) : (0, util_1.emptyBuffer)());
|
||||
case 'json':
|
||||
if (jsonSupported) {
|
||||
return cb(null, req.response);
|
||||
}
|
||||
else {
|
||||
return cb(null, JSON.parse(req.responseText));
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR error: response returned code ".concat(req.status)));
|
||||
}
|
||||
}
|
||||
};
|
||||
req.send();
|
||||
}
|
||||
function syncDownloadFileModern(p, type) {
|
||||
var req = new XMLHttpRequest();
|
||||
req.open('GET', p, false);
|
||||
// On most platforms, we cannot set the responseType of synchronous downloads.
|
||||
// @todo Test for this; IE10 allows this, as do older versions of Chrome/FF.
|
||||
var data = null;
|
||||
var err = null;
|
||||
// Classic hack to download binary data as a string.
|
||||
req.overrideMimeType('text/plain; charset=x-user-defined');
|
||||
req.onreadystatechange = function (e) {
|
||||
if (req.readyState === 4) {
|
||||
if (req.status === 200) {
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
// Convert the text into a buffer.
|
||||
var text = req.responseText;
|
||||
data = Buffer.alloc(text.length);
|
||||
// Throw away the upper bits of each character.
|
||||
for (var i = 0; i < text.length; i++) {
|
||||
// This will automatically throw away the upper bit of each
|
||||
// character for us.
|
||||
data[i] = text.charCodeAt(i);
|
||||
}
|
||||
return;
|
||||
case 'json':
|
||||
data = JSON.parse(req.responseText);
|
||||
return;
|
||||
}
|
||||
}
|
||||
else {
|
||||
err = new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR error: response returned code ".concat(req.status));
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
req.send();
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
function syncDownloadFileIE10(p, type) {
|
||||
var req = new XMLHttpRequest();
|
||||
req.open('GET', p, false);
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
req.responseType = 'arraybuffer';
|
||||
break;
|
||||
case 'json':
|
||||
// IE10 does not support the JSON type.
|
||||
break;
|
||||
default:
|
||||
throw new api_error_1.ApiError(api_error_1.ErrorCode.EINVAL, "Invalid download type: " + type);
|
||||
}
|
||||
var data;
|
||||
var err;
|
||||
req.onreadystatechange = function (e) {
|
||||
if (req.readyState === 4) {
|
||||
if (req.status === 200) {
|
||||
switch (type) {
|
||||
case 'buffer':
|
||||
data = Buffer.from(req.response);
|
||||
break;
|
||||
case 'json':
|
||||
data = JSON.parse(req.response);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
err = new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR error: response returned code ".concat(req.status));
|
||||
}
|
||||
}
|
||||
};
|
||||
req.send();
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
function getFileSize(async, p, cb) {
|
||||
var req = new XMLHttpRequest();
|
||||
req.open('HEAD', p, async);
|
||||
req.onreadystatechange = function (e) {
|
||||
if (req.readyState === 4) {
|
||||
if (req.status === 200) {
|
||||
try {
|
||||
return cb(null, parseInt(req.getResponseHeader('Content-Length') || '-1', 10));
|
||||
}
|
||||
catch (e) {
|
||||
// In the event that the header isn't present or there is an error...
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR HEAD error: Could not read content-length."));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return cb(new api_error_1.ApiError(api_error_1.ErrorCode.EIO, "XHR HEAD error: response returned code ".concat(req.status)));
|
||||
}
|
||||
}
|
||||
};
|
||||
req.send();
|
||||
}
|
||||
/**
|
||||
* Asynchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
exports.asyncDownloadFile = asyncDownloadFileModern;
|
||||
/**
|
||||
* Synchronously download a file as a buffer or a JSON object.
|
||||
* Note that the third function signature with a non-specialized type is
|
||||
* invalid, but TypeScript requires it when you specialize string arguments to
|
||||
* constants.
|
||||
* @hidden
|
||||
*/
|
||||
exports.syncDownloadFile = (util_1.isIE && typeof Blob !== 'undefined') ? syncDownloadFileIE10 : syncDownloadFileModern;
|
||||
/**
|
||||
* Synchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
function getFileSizeSync(p) {
|
||||
var rv = -1;
|
||||
getFileSize(false, p, function (err, size) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
rv = size;
|
||||
});
|
||||
return rv;
|
||||
}
|
||||
exports.getFileSizeSync = getFileSizeSync;
|
||||
/**
|
||||
* Asynchronously retrieves the size of the given file in bytes.
|
||||
* @hidden
|
||||
*/
|
||||
function getFileSizeAsync(p, cb) {
|
||||
getFileSize(true, p, cb);
|
||||
}
|
||||
exports.getFileSizeAsync = getFileSizeAsync;
|
||||
//# sourceMappingURL=xhr.js.map
|
||||
5
sandpack-generated/static/browserfs11/node/index.d.ts
vendored
Normal file
5
sandpack-generated/static/browserfs11/node/index.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
/**
|
||||
* BrowserFS's main entry point.
|
||||
* It installs all of the needed polyfills, and requires() the main module.
|
||||
*/
|
||||
export * from './core/browserfs';
|
||||
63
sandpack-generated/static/browserfs11/node/index.js
Normal file
63
sandpack-generated/static/browserfs11/node/index.js
Normal file
@@ -0,0 +1,63 @@
|
||||
"use strict";
|
||||
/**
|
||||
* BrowserFS's main entry point.
|
||||
* It installs all of the needed polyfills, and requires() the main module.
|
||||
*/
|
||||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
var desc = Object.getOwnPropertyDescriptor(m, k);
|
||||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
||||
desc = { enumerable: true, get: function() { return m[k]; } };
|
||||
}
|
||||
Object.defineProperty(o, k2, desc);
|
||||
}) : (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
o[k2] = m[k];
|
||||
}));
|
||||
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
||||
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
// IE substr does not support negative indices
|
||||
if ('ab'.substr(-1) !== 'b') {
|
||||
String.prototype.substr = function (substr) {
|
||||
return function (start, length) {
|
||||
// did we get a negative start, calculate how much it is from the
|
||||
// beginning of the string
|
||||
if (start < 0) {
|
||||
start = this.length + start;
|
||||
}
|
||||
// call the original function
|
||||
return substr.call(this, start, length);
|
||||
};
|
||||
}(String.prototype.substr);
|
||||
}
|
||||
// Polyfill for Uint8Array.prototype.slice.
|
||||
// Safari and some other browsers do not define it.
|
||||
if (typeof (ArrayBuffer) !== 'undefined' && typeof (Uint8Array) !== 'undefined') {
|
||||
if (!Uint8Array.prototype['slice']) {
|
||||
Uint8Array.prototype.slice = function (start, end) {
|
||||
if (start === void 0) { start = 0; }
|
||||
if (end === void 0) { end = this.length; }
|
||||
var self = this;
|
||||
if (start < 0) {
|
||||
start = this.length + start;
|
||||
if (start < 0) {
|
||||
start = 0;
|
||||
}
|
||||
}
|
||||
if (end < 0) {
|
||||
end = this.length + end;
|
||||
if (end < 0) {
|
||||
end = 0;
|
||||
}
|
||||
}
|
||||
if (end < start) {
|
||||
end = start;
|
||||
}
|
||||
return new Uint8Array(self.buffer, self.byteOffset + start, end - start);
|
||||
};
|
||||
}
|
||||
}
|
||||
__exportStar(require("./core/browserfs"), exports);
|
||||
//# sourceMappingURL=index.js.map
|
||||
46
sandpack-generated/static/browserfs11/node/typedoc.d.ts
vendored
Normal file
46
sandpack-generated/static/browserfs11/node/typedoc.d.ts
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
import { FileSystem } from './core/file_system';
|
||||
/**
|
||||
* We use typedoc in 'file' mode to avoid many issues.
|
||||
* Unfortunately, it does not process export statements properly in some circumstances.
|
||||
* Here, we redefine the main BrowserFS object for documentation purposes.
|
||||
*/
|
||||
import { FileSystem as Backends, BFSRequire } from './index';
|
||||
/**
|
||||
* BrowserFS's main interface.
|
||||
*
|
||||
* In the browser, this is exposed as the `BrowserFS` global.
|
||||
*
|
||||
* In node, this is the object you receive when you `require('browserfs')`.
|
||||
*/
|
||||
export interface BrowserFS {
|
||||
/**
|
||||
* Exposes all of the file system backends available in BrowserFS.
|
||||
*/
|
||||
FileSystem: typeof Backends;
|
||||
/**
|
||||
* Emulates Node's `require()` function for filesystem-related modules (`'fs'`, `'path'`, `'buffer'`, etc).
|
||||
*/
|
||||
BFSRequire: typeof BFSRequire;
|
||||
/**
|
||||
* You must call this function with a properly-instantiated root file system
|
||||
* before using any file system API method.
|
||||
* @param rootFS The root filesystem to use for the
|
||||
* entire BrowserFS file system.
|
||||
*/
|
||||
initialize(rootFS: FileSystem): void;
|
||||
/**
|
||||
* Installs BrowserFS onto the given object.
|
||||
* We recommend that you run install with the 'window' object to make things
|
||||
* global, as in Node.
|
||||
*
|
||||
* Properties installed:
|
||||
*
|
||||
* * Buffer
|
||||
* * process
|
||||
* * require (we monkey-patch it)
|
||||
*
|
||||
* This allows you to write code as if you were running inside Node.
|
||||
* @param obj The object to install things onto (e.g. window)
|
||||
*/
|
||||
install(obj: any): void;
|
||||
}
|
||||
3
sandpack-generated/static/browserfs11/node/typedoc.js
Normal file
3
sandpack-generated/static/browserfs11/node/typedoc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
//# sourceMappingURL=typedoc.js.map
|
||||
Reference in New Issue
Block a user