commit
This commit is contained in:
parent
68f4b60012
commit
41ae7ff4bd
1010 changed files with 38622 additions and 17071 deletions
2
node_modules/@discordjs/ws/dist/defaultWorker.d.ts
generated
vendored
Normal file
2
node_modules/@discordjs/ws/dist/defaultWorker.d.ts
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
|
||||
export { }
|
982
node_modules/@discordjs/ws/dist/defaultWorker.js
generated
vendored
Normal file
982
node_modules/@discordjs/ws/dist/defaultWorker.js
generated
vendored
Normal file
|
@ -0,0 +1,982 @@
|
|||
"use strict";
|
||||
var __create = Object.create;
|
||||
var __defProp = Object.defineProperty;
|
||||
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
||||
var __getOwnPropNames = Object.getOwnPropertyNames;
|
||||
var __getProtoOf = Object.getPrototypeOf;
|
||||
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
||||
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
var __copyProps = (to, from, except, desc) => {
|
||||
if (from && typeof from === "object" || typeof from === "function") {
|
||||
for (let key of __getOwnPropNames(from))
|
||||
if (!__hasOwnProp.call(to, key) && key !== except)
|
||||
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
||||
}
|
||||
return to;
|
||||
};
|
||||
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
||||
// If the importer is in node compatibility mode or this is not an ESM
|
||||
// file that has been converted to a CommonJS file using a Babel-
|
||||
// compatible transform (i.e. "__esModule" has not been set), then set
|
||||
// "default" to the CommonJS "module.exports" for node compatibility.
|
||||
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
||||
mod
|
||||
));
|
||||
var __publicField = (obj, key, value) => {
|
||||
__defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
|
||||
return value;
|
||||
};
|
||||
|
||||
// src/utils/WorkerBootstrapper.ts
|
||||
var import_node_worker_threads3 = require("worker_threads");
|
||||
var import_collection7 = require("@discordjs/collection");
|
||||
|
||||
// src/strategies/context/WorkerContextFetchingStrategy.ts
|
||||
var import_node_worker_threads2 = require("worker_threads");
|
||||
var import_collection2 = require("@discordjs/collection");
|
||||
|
||||
// src/strategies/sharding/WorkerShardingStrategy.ts
|
||||
var import_node_events = require("events");
|
||||
var import_node_path = require("path");
|
||||
var import_node_worker_threads = require("worker_threads");
|
||||
var import_collection = require("@discordjs/collection");
|
||||
|
||||
// src/strategies/context/IContextFetchingStrategy.ts
|
||||
async function managerToFetchingStrategyOptions(manager) {
|
||||
const {
|
||||
buildIdentifyThrottler,
|
||||
buildStrategy,
|
||||
retrieveSessionInfo,
|
||||
updateSessionInfo,
|
||||
shardCount,
|
||||
shardIds,
|
||||
rest,
|
||||
...managerOptions
|
||||
} = manager.options;
|
||||
return {
|
||||
...managerOptions,
|
||||
gatewayInformation: await manager.fetchGatewayInformation(),
|
||||
shardCount: await manager.getShardCount()
|
||||
};
|
||||
}
|
||||
__name(managerToFetchingStrategyOptions, "managerToFetchingStrategyOptions");
|
||||
|
||||
// src/strategies/context/WorkerContextFetchingStrategy.ts
|
||||
var WorkerContextFetchingStrategy = class {
|
||||
constructor(options) {
|
||||
this.options = options;
|
||||
if (import_node_worker_threads2.isMainThread) {
|
||||
throw new Error("Cannot instantiate WorkerContextFetchingStrategy on the main thread");
|
||||
}
|
||||
import_node_worker_threads2.parentPort.on("message", (payload) => {
|
||||
if (payload.op === 3 /* SessionInfoResponse */) {
|
||||
this.sessionPromises.get(payload.nonce)?.(payload.session);
|
||||
this.sessionPromises.delete(payload.nonce);
|
||||
}
|
||||
if (payload.op === 4 /* ShardIdentifyResponse */) {
|
||||
const promise = this.waitForIdentifyPromises.get(payload.nonce);
|
||||
if (payload.ok) {
|
||||
promise?.resolve();
|
||||
} else {
|
||||
promise?.reject();
|
||||
}
|
||||
this.waitForIdentifyPromises.delete(payload.nonce);
|
||||
}
|
||||
});
|
||||
}
|
||||
sessionPromises = new import_collection2.Collection();
|
||||
waitForIdentifyPromises = new import_collection2.Collection();
|
||||
async retrieveSessionInfo(shardId) {
|
||||
const nonce = Math.random();
|
||||
const payload = {
|
||||
op: 3 /* RetrieveSessionInfo */,
|
||||
shardId,
|
||||
nonce
|
||||
};
|
||||
const promise = new Promise((resolve2) => this.sessionPromises.set(nonce, resolve2));
|
||||
import_node_worker_threads2.parentPort.postMessage(payload);
|
||||
return promise;
|
||||
}
|
||||
updateSessionInfo(shardId, sessionInfo) {
|
||||
const payload = {
|
||||
op: 4 /* UpdateSessionInfo */,
|
||||
shardId,
|
||||
session: sessionInfo
|
||||
};
|
||||
import_node_worker_threads2.parentPort.postMessage(payload);
|
||||
}
|
||||
async waitForIdentify(shardId, signal) {
|
||||
const nonce = Math.random();
|
||||
const payload = {
|
||||
op: 5 /* WaitForIdentify */,
|
||||
nonce,
|
||||
shardId
|
||||
};
|
||||
const promise = new Promise(
|
||||
(resolve2, reject) => (
|
||||
// eslint-disable-next-line no-promise-executor-return
|
||||
this.waitForIdentifyPromises.set(nonce, { resolve: resolve2, reject })
|
||||
)
|
||||
);
|
||||
import_node_worker_threads2.parentPort.postMessage(payload);
|
||||
const listener = /* @__PURE__ */ __name(() => {
|
||||
const payload2 = {
|
||||
op: 8 /* CancelIdentify */,
|
||||
nonce
|
||||
};
|
||||
import_node_worker_threads2.parentPort.postMessage(payload2);
|
||||
}, "listener");
|
||||
signal.addEventListener("abort", listener);
|
||||
try {
|
||||
await promise;
|
||||
} finally {
|
||||
signal.removeEventListener("abort", listener);
|
||||
}
|
||||
}
|
||||
};
|
||||
__name(WorkerContextFetchingStrategy, "WorkerContextFetchingStrategy");
|
||||
|
||||
// src/ws/WebSocketShard.ts
|
||||
var import_node_buffer = require("buffer");
|
||||
var import_node_events2 = require("events");
|
||||
var import_node_timers = require("timers");
|
||||
var import_promises2 = require("timers/promises");
|
||||
var import_node_url = require("url");
|
||||
var import_node_util = require("util");
|
||||
var import_node_zlib = require("zlib");
|
||||
var import_collection6 = require("@discordjs/collection");
|
||||
var import_util2 = require("@discordjs/util");
|
||||
var import_async_queue2 = require("@sapphire/async-queue");
|
||||
var import_async_event_emitter = require("@vladfrangu/async_event_emitter");
|
||||
var import_v102 = require("discord-api-types/v10");
|
||||
var import_ws = require("ws");
|
||||
|
||||
// src/utils/constants.ts
|
||||
var import_node_process = __toESM(require("process"));
|
||||
var import_collection5 = require("@discordjs/collection");
|
||||
var import_util = require("@discordjs/util");
|
||||
var import_v10 = require("discord-api-types/v10");
|
||||
|
||||
// src/strategies/sharding/SimpleShardingStrategy.ts
|
||||
var import_collection3 = require("@discordjs/collection");
|
||||
|
||||
// src/strategies/context/SimpleContextFetchingStrategy.ts
|
||||
var _SimpleContextFetchingStrategy = class {
|
||||
constructor(manager, options) {
|
||||
this.manager = manager;
|
||||
this.options = options;
|
||||
}
|
||||
static async ensureThrottler(manager) {
|
||||
const throttler = _SimpleContextFetchingStrategy.throttlerCache.get(manager);
|
||||
if (throttler) {
|
||||
return throttler;
|
||||
}
|
||||
const newThrottler = await manager.options.buildIdentifyThrottler(manager);
|
||||
_SimpleContextFetchingStrategy.throttlerCache.set(manager, newThrottler);
|
||||
return newThrottler;
|
||||
}
|
||||
async retrieveSessionInfo(shardId) {
|
||||
return this.manager.options.retrieveSessionInfo(shardId);
|
||||
}
|
||||
updateSessionInfo(shardId, sessionInfo) {
|
||||
return this.manager.options.updateSessionInfo(shardId, sessionInfo);
|
||||
}
|
||||
async waitForIdentify(shardId, signal) {
|
||||
const throttler = await _SimpleContextFetchingStrategy.ensureThrottler(this.manager);
|
||||
await throttler.waitForIdentify(shardId, signal);
|
||||
}
|
||||
};
|
||||
var SimpleContextFetchingStrategy = _SimpleContextFetchingStrategy;
|
||||
__name(SimpleContextFetchingStrategy, "SimpleContextFetchingStrategy");
|
||||
// This strategy assumes every shard is running under the same process - therefore we need a single
|
||||
// IdentifyThrottler per manager.
|
||||
__publicField(SimpleContextFetchingStrategy, "throttlerCache", /* @__PURE__ */ new WeakMap());
|
||||
|
||||
// src/strategies/sharding/SimpleShardingStrategy.ts
|
||||
var SimpleShardingStrategy = class {
|
||||
manager;
|
||||
shards = new import_collection3.Collection();
|
||||
constructor(manager) {
|
||||
this.manager = manager;
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.spawn}
|
||||
*/
|
||||
async spawn(shardIds) {
|
||||
const strategyOptions = await managerToFetchingStrategyOptions(this.manager);
|
||||
for (const shardId of shardIds) {
|
||||
const strategy = new SimpleContextFetchingStrategy(this.manager, strategyOptions);
|
||||
const shard = new WebSocketShard(strategy, shardId);
|
||||
for (const event of Object.values(WebSocketShardEvents)) {
|
||||
shard.on(event, (payload) => this.manager.emit(event, { ...payload, shardId }));
|
||||
}
|
||||
this.shards.set(shardId, shard);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.connect}
|
||||
*/
|
||||
async connect() {
|
||||
const promises = [];
|
||||
for (const shard of this.shards.values()) {
|
||||
promises.push(shard.connect());
|
||||
}
|
||||
await Promise.all(promises);
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.destroy}
|
||||
*/
|
||||
async destroy(options) {
|
||||
const promises = [];
|
||||
for (const shard of this.shards.values()) {
|
||||
promises.push(shard.destroy(options));
|
||||
}
|
||||
await Promise.all(promises);
|
||||
this.shards.clear();
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.send}
|
||||
*/
|
||||
async send(shardId, payload) {
|
||||
const shard = this.shards.get(shardId);
|
||||
if (!shard) {
|
||||
throw new RangeError(`Shard ${shardId} not found`);
|
||||
}
|
||||
return shard.send(payload);
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.fetchStatus}
|
||||
*/
|
||||
async fetchStatus() {
|
||||
return this.shards.mapValues((shard) => shard.status);
|
||||
}
|
||||
};
|
||||
__name(SimpleShardingStrategy, "SimpleShardingStrategy");
|
||||
|
||||
// src/throttling/SimpleIdentifyThrottler.ts
|
||||
var import_promises = require("timers/promises");
|
||||
var import_collection4 = require("@discordjs/collection");
|
||||
var import_async_queue = require("@sapphire/async-queue");
|
||||
var SimpleIdentifyThrottler = class {
|
||||
constructor(maxConcurrency) {
|
||||
this.maxConcurrency = maxConcurrency;
|
||||
}
|
||||
states = new import_collection4.Collection();
|
||||
/**
|
||||
* {@inheritDoc IIdentifyThrottler.waitForIdentify}
|
||||
*/
|
||||
async waitForIdentify(shardId, signal) {
|
||||
const key = shardId % this.maxConcurrency;
|
||||
const state = this.states.ensure(key, () => {
|
||||
return {
|
||||
queue: new import_async_queue.AsyncQueue(),
|
||||
resetsAt: Number.POSITIVE_INFINITY
|
||||
};
|
||||
});
|
||||
await state.queue.wait({ signal });
|
||||
try {
|
||||
const diff = state.resetsAt - Date.now();
|
||||
if (diff <= 5e3) {
|
||||
const time = diff + Math.random() * 1500;
|
||||
await (0, import_promises.setTimeout)(time);
|
||||
}
|
||||
state.resetsAt = Date.now() + 5e3;
|
||||
} finally {
|
||||
state.queue.shift();
|
||||
}
|
||||
}
|
||||
};
|
||||
__name(SimpleIdentifyThrottler, "SimpleIdentifyThrottler");
|
||||
|
||||
// src/utils/constants.ts
|
||||
var DefaultDeviceProperty = `@discordjs/ws 0.8.3`;
|
||||
var getDefaultSessionStore = (0, import_util.lazy)(() => new import_collection5.Collection());
|
||||
var DefaultWebSocketManagerOptions = {
|
||||
async buildIdentifyThrottler(manager) {
|
||||
const info = await manager.fetchGatewayInformation();
|
||||
return new SimpleIdentifyThrottler(info.session_start_limit.max_concurrency);
|
||||
},
|
||||
buildStrategy: (manager) => new SimpleShardingStrategy(manager),
|
||||
shardCount: null,
|
||||
shardIds: null,
|
||||
largeThreshold: null,
|
||||
initialPresence: null,
|
||||
identifyProperties: {
|
||||
browser: DefaultDeviceProperty,
|
||||
device: DefaultDeviceProperty,
|
||||
os: import_node_process.default.platform
|
||||
},
|
||||
version: import_v10.APIVersion,
|
||||
encoding: "json" /* JSON */,
|
||||
compression: null,
|
||||
retrieveSessionInfo(shardId) {
|
||||
const store = getDefaultSessionStore();
|
||||
return store.get(shardId) ?? null;
|
||||
},
|
||||
updateSessionInfo(shardId, info) {
|
||||
const store = getDefaultSessionStore();
|
||||
if (info) {
|
||||
store.set(shardId, info);
|
||||
} else {
|
||||
store.delete(shardId);
|
||||
}
|
||||
},
|
||||
handshakeTimeout: 3e4,
|
||||
helloTimeout: 6e4,
|
||||
readyTimeout: 15e3
|
||||
};
|
||||
var ImportantGatewayOpcodes = /* @__PURE__ */ new Set([
|
||||
import_v10.GatewayOpcodes.Heartbeat,
|
||||
import_v10.GatewayOpcodes.Identify,
|
||||
import_v10.GatewayOpcodes.Resume
|
||||
]);
|
||||
function getInitialSendRateLimitState() {
|
||||
return {
|
||||
remaining: 120,
|
||||
resetAt: Date.now() + 6e4
|
||||
};
|
||||
}
|
||||
__name(getInitialSendRateLimitState, "getInitialSendRateLimitState");
|
||||
|
||||
// src/ws/WebSocketShard.ts
|
||||
var getZlibSync = (0, import_util2.lazy)(async () => import("zlib-sync").then((mod) => mod.default).catch(() => null));
|
||||
var WebSocketShardEvents = /* @__PURE__ */ ((WebSocketShardEvents2) => {
|
||||
WebSocketShardEvents2["Closed"] = "closed";
|
||||
WebSocketShardEvents2["Debug"] = "debug";
|
||||
WebSocketShardEvents2["Dispatch"] = "dispatch";
|
||||
WebSocketShardEvents2["Error"] = "error";
|
||||
WebSocketShardEvents2["HeartbeatComplete"] = "heartbeat";
|
||||
WebSocketShardEvents2["Hello"] = "hello";
|
||||
WebSocketShardEvents2["Ready"] = "ready";
|
||||
WebSocketShardEvents2["Resumed"] = "resumed";
|
||||
return WebSocketShardEvents2;
|
||||
})(WebSocketShardEvents || {});
|
||||
var WebSocketShardDestroyRecovery = /* @__PURE__ */ ((WebSocketShardDestroyRecovery2) => {
|
||||
WebSocketShardDestroyRecovery2[WebSocketShardDestroyRecovery2["Reconnect"] = 0] = "Reconnect";
|
||||
WebSocketShardDestroyRecovery2[WebSocketShardDestroyRecovery2["Resume"] = 1] = "Resume";
|
||||
return WebSocketShardDestroyRecovery2;
|
||||
})(WebSocketShardDestroyRecovery || {});
|
||||
var WebSocketShard = class extends import_async_event_emitter.AsyncEventEmitter {
|
||||
connection = null;
|
||||
useIdentifyCompress = false;
|
||||
inflate = null;
|
||||
textDecoder = new import_node_util.TextDecoder();
|
||||
replayedEvents = 0;
|
||||
isAck = true;
|
||||
sendRateLimitState = getInitialSendRateLimitState();
|
||||
initialHeartbeatTimeoutController = null;
|
||||
heartbeatInterval = null;
|
||||
lastHeartbeatAt = -1;
|
||||
// Indicates whether the shard has already resolved its original connect() call
|
||||
initialConnectResolved = false;
|
||||
// Indicates if we failed to connect to the ws url (ECONNREFUSED/ECONNRESET)
|
||||
failedToConnectDueToNetworkError = false;
|
||||
sendQueue = new import_async_queue2.AsyncQueue();
|
||||
timeoutAbortControllers = new import_collection6.Collection();
|
||||
strategy;
|
||||
id;
|
||||
#status = 0 /* Idle */;
|
||||
get status() {
|
||||
return this.#status;
|
||||
}
|
||||
constructor(strategy, id) {
|
||||
super();
|
||||
this.strategy = strategy;
|
||||
this.id = id;
|
||||
}
|
||||
async connect() {
|
||||
const promise = this.initialConnectResolved ? Promise.resolve() : (0, import_node_events2.once)(this, "ready" /* Ready */);
|
||||
void this.internalConnect();
|
||||
await promise;
|
||||
this.initialConnectResolved = true;
|
||||
}
|
||||
async internalConnect() {
|
||||
if (this.#status !== 0 /* Idle */) {
|
||||
throw new Error("Tried to connect a shard that wasn't idle");
|
||||
}
|
||||
const { version, encoding, compression } = this.strategy.options;
|
||||
const params = new import_node_url.URLSearchParams({ v: version, encoding });
|
||||
if (compression) {
|
||||
const zlib = await getZlibSync();
|
||||
if (zlib) {
|
||||
params.append("compress", compression);
|
||||
this.inflate = new zlib.Inflate({
|
||||
chunkSize: 65535,
|
||||
to: "string"
|
||||
});
|
||||
} else if (!this.useIdentifyCompress) {
|
||||
this.useIdentifyCompress = true;
|
||||
console.warn(
|
||||
"WebSocketShard: Compression is enabled but zlib-sync is not installed, falling back to identify compress"
|
||||
);
|
||||
}
|
||||
}
|
||||
const session = await this.strategy.retrieveSessionInfo(this.id);
|
||||
const url = `${session?.resumeURL ?? this.strategy.options.gatewayInformation.url}?${params.toString()}`;
|
||||
this.debug([`Connecting to ${url}`]);
|
||||
const connection = new import_ws.WebSocket(url, { handshakeTimeout: this.strategy.options.handshakeTimeout ?? void 0 }).on("message", this.onMessage.bind(this)).on("error", this.onError.bind(this)).on("close", this.onClose.bind(this));
|
||||
connection.binaryType = "arraybuffer";
|
||||
this.connection = connection;
|
||||
this.#status = 1 /* Connecting */;
|
||||
this.sendRateLimitState = getInitialSendRateLimitState();
|
||||
const { ok } = await this.waitForEvent("hello" /* Hello */, this.strategy.options.helloTimeout);
|
||||
if (!ok) {
|
||||
return;
|
||||
}
|
||||
if (session?.shardCount === this.strategy.options.shardCount) {
|
||||
await this.resume(session);
|
||||
} else {
|
||||
await this.identify();
|
||||
}
|
||||
}
|
||||
async destroy(options = {}) {
|
||||
if (this.#status === 0 /* Idle */) {
|
||||
this.debug(["Tried to destroy a shard that was idle"]);
|
||||
return;
|
||||
}
|
||||
if (!options.code) {
|
||||
options.code = options.recover === 1 /* Resume */ ? 4200 /* Resuming */ : 1e3 /* Normal */;
|
||||
}
|
||||
this.debug([
|
||||
"Destroying shard",
|
||||
`Reason: ${options.reason ?? "none"}`,
|
||||
`Code: ${options.code}`,
|
||||
`Recover: ${options.recover === void 0 ? "none" : WebSocketShardDestroyRecovery[options.recover]}`
|
||||
]);
|
||||
this.isAck = true;
|
||||
if (this.heartbeatInterval) {
|
||||
(0, import_node_timers.clearInterval)(this.heartbeatInterval);
|
||||
}
|
||||
if (this.initialHeartbeatTimeoutController) {
|
||||
this.initialHeartbeatTimeoutController.abort();
|
||||
this.initialHeartbeatTimeoutController = null;
|
||||
}
|
||||
this.lastHeartbeatAt = -1;
|
||||
for (const controller of this.timeoutAbortControllers.values()) {
|
||||
controller.abort();
|
||||
}
|
||||
this.timeoutAbortControllers.clear();
|
||||
this.failedToConnectDueToNetworkError = false;
|
||||
if (options.recover !== 1 /* Resume */) {
|
||||
await this.strategy.updateSessionInfo(this.id, null);
|
||||
}
|
||||
if (this.connection) {
|
||||
this.connection.removeAllListeners("message");
|
||||
this.connection.removeAllListeners("close");
|
||||
const shouldClose = this.connection.readyState === import_ws.WebSocket.OPEN;
|
||||
this.debug([
|
||||
"Connection status during destroy",
|
||||
`Needs closing: ${shouldClose}`,
|
||||
`Ready state: ${this.connection.readyState}`
|
||||
]);
|
||||
if (shouldClose) {
|
||||
this.connection.close(options.code, options.reason);
|
||||
await (0, import_node_events2.once)(this.connection, "close");
|
||||
this.emit("closed" /* Closed */, { code: options.code });
|
||||
}
|
||||
this.connection.removeAllListeners("error");
|
||||
} else {
|
||||
this.debug(["Destroying a shard that has no connection; please open an issue on GitHub"]);
|
||||
}
|
||||
this.#status = 0 /* Idle */;
|
||||
if (options.recover !== void 0) {
|
||||
await (0, import_promises2.setTimeout)(500);
|
||||
return this.internalConnect();
|
||||
}
|
||||
}
|
||||
async waitForEvent(event, timeoutDuration) {
|
||||
this.debug([`Waiting for event ${event} ${timeoutDuration ? `for ${timeoutDuration}ms` : "indefinitely"}`]);
|
||||
const timeoutController = new AbortController();
|
||||
const timeout = timeoutDuration ? (0, import_node_timers.setTimeout)(() => timeoutController.abort(), timeoutDuration).unref() : null;
|
||||
this.timeoutAbortControllers.set(event, timeoutController);
|
||||
const closeController = new AbortController();
|
||||
try {
|
||||
const closed = await Promise.race([
|
||||
(0, import_node_events2.once)(this, event, { signal: timeoutController.signal }).then(() => false),
|
||||
(0, import_node_events2.once)(this, "closed" /* Closed */, { signal: closeController.signal }).then(() => true)
|
||||
]);
|
||||
return { ok: !closed };
|
||||
} catch {
|
||||
void this.destroy({
|
||||
code: 1e3 /* Normal */,
|
||||
reason: "Something timed out or went wrong while waiting for an event",
|
||||
recover: 0 /* Reconnect */
|
||||
});
|
||||
return { ok: false };
|
||||
} finally {
|
||||
if (timeout) {
|
||||
(0, import_node_timers.clearTimeout)(timeout);
|
||||
}
|
||||
this.timeoutAbortControllers.delete(event);
|
||||
if (!closeController.signal.aborted) {
|
||||
closeController.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
async send(payload) {
|
||||
if (!this.connection) {
|
||||
throw new Error("WebSocketShard wasn't connected");
|
||||
}
|
||||
if (this.#status !== 3 /* Ready */ && !ImportantGatewayOpcodes.has(payload.op)) {
|
||||
this.debug(["Tried to send a non-crucial payload before the shard was ready, waiting"]);
|
||||
try {
|
||||
await (0, import_node_events2.once)(this, "ready" /* Ready */);
|
||||
} catch {
|
||||
return this.send(payload);
|
||||
}
|
||||
}
|
||||
await this.sendQueue.wait();
|
||||
if (--this.sendRateLimitState.remaining <= 0) {
|
||||
const now = Date.now();
|
||||
if (this.sendRateLimitState.resetAt > now) {
|
||||
const sleepFor = this.sendRateLimitState.resetAt - now;
|
||||
this.debug([`Was about to hit the send rate limit, sleeping for ${sleepFor}ms`]);
|
||||
const controller = new AbortController();
|
||||
const interrupted = await Promise.race([
|
||||
(0, import_promises2.setTimeout)(sleepFor).then(() => false),
|
||||
(0, import_node_events2.once)(this, "closed" /* Closed */, { signal: controller.signal }).then(() => true)
|
||||
]);
|
||||
if (interrupted) {
|
||||
this.debug(["Connection closed while waiting for the send rate limit to reset, re-queueing payload"]);
|
||||
this.sendQueue.shift();
|
||||
return this.send(payload);
|
||||
}
|
||||
controller.abort();
|
||||
}
|
||||
this.sendRateLimitState = getInitialSendRateLimitState();
|
||||
}
|
||||
this.sendQueue.shift();
|
||||
this.connection.send(JSON.stringify(payload));
|
||||
}
|
||||
async identify() {
|
||||
this.debug(["Waiting for identify throttle"]);
|
||||
const controller = new AbortController();
|
||||
const closeHandler = /* @__PURE__ */ __name(() => {
|
||||
controller.abort();
|
||||
}, "closeHandler");
|
||||
this.on("closed" /* Closed */, closeHandler);
|
||||
try {
|
||||
await this.strategy.waitForIdentify(this.id, controller.signal);
|
||||
} catch {
|
||||
this.debug(["Was waiting for an identify, but the shard closed in the meantime"]);
|
||||
return;
|
||||
} finally {
|
||||
this.off("closed" /* Closed */, closeHandler);
|
||||
}
|
||||
this.debug([
|
||||
"Identifying",
|
||||
`shard id: ${this.id.toString()}`,
|
||||
`shard count: ${this.strategy.options.shardCount}`,
|
||||
`intents: ${this.strategy.options.intents}`,
|
||||
`compression: ${this.inflate ? "zlib-stream" : this.useIdentifyCompress ? "identify" : "none"}`
|
||||
]);
|
||||
const d = {
|
||||
token: this.strategy.options.token,
|
||||
properties: this.strategy.options.identifyProperties,
|
||||
intents: this.strategy.options.intents,
|
||||
compress: this.useIdentifyCompress,
|
||||
shard: [this.id, this.strategy.options.shardCount]
|
||||
};
|
||||
if (this.strategy.options.largeThreshold) {
|
||||
d.large_threshold = this.strategy.options.largeThreshold;
|
||||
}
|
||||
if (this.strategy.options.initialPresence) {
|
||||
d.presence = this.strategy.options.initialPresence;
|
||||
}
|
||||
await this.send({
|
||||
op: import_v102.GatewayOpcodes.Identify,
|
||||
d
|
||||
});
|
||||
await this.waitForEvent("ready" /* Ready */, this.strategy.options.readyTimeout);
|
||||
}
|
||||
async resume(session) {
|
||||
this.debug([
|
||||
"Resuming session",
|
||||
`resume url: ${session.resumeURL}`,
|
||||
`sequence: ${session.sequence}`,
|
||||
`shard id: ${this.id.toString()}`
|
||||
]);
|
||||
this.#status = 2 /* Resuming */;
|
||||
this.replayedEvents = 0;
|
||||
return this.send({
|
||||
op: import_v102.GatewayOpcodes.Resume,
|
||||
d: {
|
||||
token: this.strategy.options.token,
|
||||
seq: session.sequence,
|
||||
session_id: session.sessionId
|
||||
}
|
||||
});
|
||||
}
|
||||
async heartbeat(requested = false) {
|
||||
if (!this.isAck && !requested) {
|
||||
return this.destroy({ reason: "Zombie connection", recover: 1 /* Resume */ });
|
||||
}
|
||||
const session = await this.strategy.retrieveSessionInfo(this.id);
|
||||
await this.send({
|
||||
op: import_v102.GatewayOpcodes.Heartbeat,
|
||||
d: session?.sequence ?? null
|
||||
});
|
||||
this.lastHeartbeatAt = Date.now();
|
||||
this.isAck = false;
|
||||
}
|
||||
async unpackMessage(data, isBinary) {
|
||||
const decompressable = new Uint8Array(data);
|
||||
if (!isBinary) {
|
||||
return JSON.parse(this.textDecoder.decode(decompressable));
|
||||
}
|
||||
if (this.useIdentifyCompress) {
|
||||
return new Promise((resolve2, reject) => {
|
||||
(0, import_node_zlib.inflate)(decompressable, { chunkSize: 65535 }, (err, result) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
resolve2(JSON.parse(this.textDecoder.decode(result)));
|
||||
});
|
||||
});
|
||||
}
|
||||
if (this.inflate) {
|
||||
const l = decompressable.length;
|
||||
const flush = l >= 4 && decompressable[l - 4] === 0 && decompressable[l - 3] === 0 && decompressable[l - 2] === 255 && decompressable[l - 1] === 255;
|
||||
const zlib = await getZlibSync();
|
||||
this.inflate.push(import_node_buffer.Buffer.from(decompressable), flush ? zlib.Z_SYNC_FLUSH : zlib.Z_NO_FLUSH);
|
||||
if (this.inflate.err) {
|
||||
this.emit("error" /* Error */, {
|
||||
error: new Error(`${this.inflate.err}${this.inflate.msg ? `: ${this.inflate.msg}` : ""}`)
|
||||
});
|
||||
}
|
||||
if (!flush) {
|
||||
return null;
|
||||
}
|
||||
const { result } = this.inflate;
|
||||
if (!result) {
|
||||
return null;
|
||||
}
|
||||
return JSON.parse(typeof result === "string" ? result : this.textDecoder.decode(result));
|
||||
}
|
||||
this.debug([
|
||||
"Received a message we were unable to decompress",
|
||||
`isBinary: ${isBinary.toString()}`,
|
||||
`useIdentifyCompress: ${this.useIdentifyCompress.toString()}`,
|
||||
`inflate: ${Boolean(this.inflate).toString()}`
|
||||
]);
|
||||
return null;
|
||||
}
|
||||
async onMessage(data, isBinary) {
|
||||
const payload = await this.unpackMessage(data, isBinary);
|
||||
if (!payload) {
|
||||
return;
|
||||
}
|
||||
switch (payload.op) {
|
||||
case import_v102.GatewayOpcodes.Dispatch: {
|
||||
if (this.#status === 2 /* Resuming */) {
|
||||
this.replayedEvents++;
|
||||
}
|
||||
switch (payload.t) {
|
||||
case import_v102.GatewayDispatchEvents.Ready: {
|
||||
this.#status = 3 /* Ready */;
|
||||
const session2 = {
|
||||
sequence: payload.s,
|
||||
sessionId: payload.d.session_id,
|
||||
shardId: this.id,
|
||||
shardCount: this.strategy.options.shardCount,
|
||||
resumeURL: payload.d.resume_gateway_url
|
||||
};
|
||||
await this.strategy.updateSessionInfo(this.id, session2);
|
||||
this.emit("ready" /* Ready */, { data: payload.d });
|
||||
break;
|
||||
}
|
||||
case import_v102.GatewayDispatchEvents.Resumed: {
|
||||
this.#status = 3 /* Ready */;
|
||||
this.debug([`Resumed and replayed ${this.replayedEvents} events`]);
|
||||
this.emit("resumed" /* Resumed */);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
const session = await this.strategy.retrieveSessionInfo(this.id);
|
||||
if (session) {
|
||||
if (payload.s > session.sequence) {
|
||||
await this.strategy.updateSessionInfo(this.id, { ...session, sequence: payload.s });
|
||||
}
|
||||
} else {
|
||||
this.debug([
|
||||
`Received a ${payload.t} event but no session is available. Session information cannot be re-constructed in this state without a full reconnect`
|
||||
]);
|
||||
}
|
||||
this.emit("dispatch" /* Dispatch */, { data: payload });
|
||||
break;
|
||||
}
|
||||
case import_v102.GatewayOpcodes.Heartbeat: {
|
||||
await this.heartbeat(true);
|
||||
break;
|
||||
}
|
||||
case import_v102.GatewayOpcodes.Reconnect: {
|
||||
await this.destroy({
|
||||
reason: "Told to reconnect by Discord",
|
||||
recover: 1 /* Resume */
|
||||
});
|
||||
break;
|
||||
}
|
||||
case import_v102.GatewayOpcodes.InvalidSession: {
|
||||
this.debug([`Invalid session; will attempt to resume: ${payload.d.toString()}`]);
|
||||
const session = await this.strategy.retrieveSessionInfo(this.id);
|
||||
if (payload.d && session) {
|
||||
await this.resume(session);
|
||||
} else {
|
||||
await this.destroy({
|
||||
reason: "Invalid session",
|
||||
recover: 0 /* Reconnect */
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case import_v102.GatewayOpcodes.Hello: {
|
||||
this.emit("hello" /* Hello */);
|
||||
const jitter = Math.random();
|
||||
const firstWait = Math.floor(payload.d.heartbeat_interval * jitter);
|
||||
this.debug([`Preparing first heartbeat of the connection with a jitter of ${jitter}; waiting ${firstWait}ms`]);
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
this.initialHeartbeatTimeoutController = controller;
|
||||
await (0, import_promises2.setTimeout)(firstWait, void 0, { signal: controller.signal });
|
||||
} catch {
|
||||
this.debug(["Cancelled initial heartbeat due to #destroy being called"]);
|
||||
return;
|
||||
} finally {
|
||||
this.initialHeartbeatTimeoutController = null;
|
||||
}
|
||||
await this.heartbeat();
|
||||
this.debug([`First heartbeat sent, starting to beat every ${payload.d.heartbeat_interval}ms`]);
|
||||
this.heartbeatInterval = (0, import_node_timers.setInterval)(() => void this.heartbeat(), payload.d.heartbeat_interval);
|
||||
break;
|
||||
}
|
||||
case import_v102.GatewayOpcodes.HeartbeatAck: {
|
||||
this.isAck = true;
|
||||
const ackAt = Date.now();
|
||||
this.emit("heartbeat" /* HeartbeatComplete */, {
|
||||
ackAt,
|
||||
heartbeatAt: this.lastHeartbeatAt,
|
||||
latency: ackAt - this.lastHeartbeatAt
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
onError(error) {
|
||||
if ("code" in error && ["ECONNRESET", "ECONNREFUSED"].includes(error.code)) {
|
||||
this.debug(["Failed to connect to the gateway URL specified due to a network error"]);
|
||||
this.failedToConnectDueToNetworkError = true;
|
||||
return;
|
||||
}
|
||||
this.emit("error" /* Error */, { error });
|
||||
}
|
||||
async onClose(code) {
|
||||
this.emit("closed" /* Closed */, { code });
|
||||
switch (code) {
|
||||
case 1e3 /* Normal */: {
|
||||
return this.destroy({
|
||||
code,
|
||||
reason: "Got disconnected by Discord",
|
||||
recover: 0 /* Reconnect */
|
||||
});
|
||||
}
|
||||
case 4200 /* Resuming */: {
|
||||
break;
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.UnknownError: {
|
||||
this.debug([`An unknown error occurred: ${code}`]);
|
||||
return this.destroy({ code, recover: 1 /* Resume */ });
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.UnknownOpcode: {
|
||||
this.debug(["An invalid opcode was sent to Discord."]);
|
||||
return this.destroy({ code, recover: 1 /* Resume */ });
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.DecodeError: {
|
||||
this.debug(["An invalid payload was sent to Discord."]);
|
||||
return this.destroy({ code, recover: 1 /* Resume */ });
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.NotAuthenticated: {
|
||||
this.debug(["A request was somehow sent before the identify/resume payload."]);
|
||||
return this.destroy({ code, recover: 0 /* Reconnect */ });
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.AuthenticationFailed: {
|
||||
throw new Error("Authentication failed");
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.AlreadyAuthenticated: {
|
||||
this.debug(["More than one auth payload was sent."]);
|
||||
return this.destroy({ code, recover: 0 /* Reconnect */ });
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.InvalidSeq: {
|
||||
this.debug(["An invalid sequence was sent."]);
|
||||
return this.destroy({ code, recover: 0 /* Reconnect */ });
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.RateLimited: {
|
||||
this.debug(["The WebSocket rate limit has been hit, this should never happen"]);
|
||||
return this.destroy({ code, recover: 0 /* Reconnect */ });
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.SessionTimedOut: {
|
||||
this.debug(["Session timed out."]);
|
||||
return this.destroy({ code, recover: 1 /* Resume */ });
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.InvalidShard: {
|
||||
throw new Error("Invalid shard");
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.ShardingRequired: {
|
||||
throw new Error("Sharding is required");
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.InvalidAPIVersion: {
|
||||
throw new Error("Used an invalid API version");
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.InvalidIntents: {
|
||||
throw new Error("Used invalid intents");
|
||||
}
|
||||
case import_v102.GatewayCloseCodes.DisallowedIntents: {
|
||||
throw new Error("Used disallowed intents");
|
||||
}
|
||||
default: {
|
||||
this.debug([
|
||||
`The gateway closed with an unexpected code ${code}, attempting to ${this.failedToConnectDueToNetworkError ? "reconnect" : "resume"}.`
|
||||
]);
|
||||
return this.destroy({
|
||||
code,
|
||||
recover: this.failedToConnectDueToNetworkError ? 0 /* Reconnect */ : 1 /* Resume */
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
debug(messages) {
|
||||
const message = `${messages[0]}${messages.length > 1 ? `
|
||||
${messages.slice(1).map((m) => ` ${m}`).join("\n")}` : ""}`;
|
||||
this.emit("debug" /* Debug */, { message });
|
||||
}
|
||||
};
|
||||
__name(WebSocketShard, "WebSocketShard");
|
||||
|
||||
// src/utils/WorkerBootstrapper.ts
|
||||
var WorkerBootstrapper = class {
|
||||
/**
|
||||
* The data passed to the worker thread
|
||||
*/
|
||||
data = import_node_worker_threads3.workerData;
|
||||
/**
|
||||
* The shards that are managed by this worker
|
||||
*/
|
||||
shards = new import_collection7.Collection();
|
||||
constructor() {
|
||||
if (import_node_worker_threads3.isMainThread) {
|
||||
throw new Error("Expected WorkerBootstrap to not be used within the main thread");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Helper method to initiate a shard's connection process
|
||||
*/
|
||||
async connect(shardId) {
|
||||
const shard = this.shards.get(shardId);
|
||||
if (!shard) {
|
||||
throw new RangeError(`Shard ${shardId} does not exist`);
|
||||
}
|
||||
await shard.connect();
|
||||
}
|
||||
/**
|
||||
* Helper method to destroy a shard
|
||||
*/
|
||||
async destroy(shardId, options) {
|
||||
const shard = this.shards.get(shardId);
|
||||
if (!shard) {
|
||||
throw new RangeError(`Shard ${shardId} does not exist`);
|
||||
}
|
||||
await shard.destroy(options);
|
||||
}
|
||||
/**
|
||||
* Helper method to attach event listeners to the parentPort
|
||||
*/
|
||||
setupThreadEvents() {
|
||||
import_node_worker_threads3.parentPort.on("messageerror", (err) => {
|
||||
throw err;
|
||||
}).on("message", async (payload) => {
|
||||
switch (payload.op) {
|
||||
case 0 /* Connect */: {
|
||||
await this.connect(payload.shardId);
|
||||
const response = {
|
||||
op: 0 /* Connected */,
|
||||
shardId: payload.shardId
|
||||
};
|
||||
import_node_worker_threads3.parentPort.postMessage(response);
|
||||
break;
|
||||
}
|
||||
case 1 /* Destroy */: {
|
||||
await this.destroy(payload.shardId, payload.options);
|
||||
const response = {
|
||||
op: 1 /* Destroyed */,
|
||||
shardId: payload.shardId
|
||||
};
|
||||
import_node_worker_threads3.parentPort.postMessage(response);
|
||||
break;
|
||||
}
|
||||
case 2 /* Send */: {
|
||||
const shard = this.shards.get(payload.shardId);
|
||||
if (!shard) {
|
||||
throw new RangeError(`Shard ${payload.shardId} does not exist`);
|
||||
}
|
||||
await shard.send(payload.payload);
|
||||
break;
|
||||
}
|
||||
case 3 /* SessionInfoResponse */: {
|
||||
break;
|
||||
}
|
||||
case 4 /* ShardIdentifyResponse */: {
|
||||
break;
|
||||
}
|
||||
case 5 /* FetchStatus */: {
|
||||
const shard = this.shards.get(payload.shardId);
|
||||
if (!shard) {
|
||||
throw new Error(`Shard ${payload.shardId} does not exist`);
|
||||
}
|
||||
const response = {
|
||||
op: 6 /* FetchStatusResponse */,
|
||||
status: shard.status,
|
||||
nonce: payload.nonce
|
||||
};
|
||||
import_node_worker_threads3.parentPort.postMessage(response);
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Bootstraps the worker thread with the provided options
|
||||
*/
|
||||
async bootstrap(options = {}) {
|
||||
for (const shardId of this.data.shardIds) {
|
||||
const shard = new WebSocketShard(new WorkerContextFetchingStrategy(this.data), shardId);
|
||||
for (const event of options.forwardEvents ?? Object.values(WebSocketShardEvents)) {
|
||||
shard.on(event, (data) => {
|
||||
const payload = {
|
||||
op: 2 /* Event */,
|
||||
event,
|
||||
data,
|
||||
shardId
|
||||
};
|
||||
import_node_worker_threads3.parentPort.postMessage(payload);
|
||||
});
|
||||
}
|
||||
await options.shardCallback?.(shard);
|
||||
this.shards.set(shardId, shard);
|
||||
}
|
||||
this.setupThreadEvents();
|
||||
const message = {
|
||||
op: 7 /* WorkerReady */
|
||||
};
|
||||
import_node_worker_threads3.parentPort.postMessage(message);
|
||||
}
|
||||
};
|
||||
__name(WorkerBootstrapper, "WorkerBootstrapper");
|
||||
|
||||
// src/strategies/sharding/defaultWorker.ts
|
||||
var bootstrapper = new WorkerBootstrapper();
|
||||
void bootstrapper.bootstrap();
|
||||
//# sourceMappingURL=defaultWorker.js.map
|
1
node_modules/@discordjs/ws/dist/defaultWorker.js.map
generated
vendored
Normal file
1
node_modules/@discordjs/ws/dist/defaultWorker.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
964
node_modules/@discordjs/ws/dist/defaultWorker.mjs
generated
vendored
Normal file
964
node_modules/@discordjs/ws/dist/defaultWorker.mjs
generated
vendored
Normal file
|
@ -0,0 +1,964 @@
|
|||
var __defProp = Object.defineProperty;
|
||||
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
var __publicField = (obj, key, value) => {
|
||||
__defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
|
||||
return value;
|
||||
};
|
||||
|
||||
// src/utils/WorkerBootstrapper.ts
|
||||
import { isMainThread as isMainThread2, parentPort as parentPort2, workerData } from "node:worker_threads";
|
||||
import { Collection as Collection7 } from "@discordjs/collection";
|
||||
|
||||
// src/strategies/context/WorkerContextFetchingStrategy.ts
|
||||
import { isMainThread, parentPort } from "node:worker_threads";
|
||||
import { Collection as Collection2 } from "@discordjs/collection";
|
||||
|
||||
// src/strategies/sharding/WorkerShardingStrategy.ts
|
||||
import { once } from "node:events";
|
||||
import { join, isAbsolute, resolve } from "node:path";
|
||||
import { Worker } from "node:worker_threads";
|
||||
import { Collection } from "@discordjs/collection";
|
||||
|
||||
// src/strategies/context/IContextFetchingStrategy.ts
|
||||
async function managerToFetchingStrategyOptions(manager) {
|
||||
const {
|
||||
buildIdentifyThrottler,
|
||||
buildStrategy,
|
||||
retrieveSessionInfo,
|
||||
updateSessionInfo,
|
||||
shardCount,
|
||||
shardIds,
|
||||
rest,
|
||||
...managerOptions
|
||||
} = manager.options;
|
||||
return {
|
||||
...managerOptions,
|
||||
gatewayInformation: await manager.fetchGatewayInformation(),
|
||||
shardCount: await manager.getShardCount()
|
||||
};
|
||||
}
|
||||
__name(managerToFetchingStrategyOptions, "managerToFetchingStrategyOptions");
|
||||
|
||||
// src/strategies/context/WorkerContextFetchingStrategy.ts
|
||||
var WorkerContextFetchingStrategy = class {
|
||||
constructor(options) {
|
||||
this.options = options;
|
||||
if (isMainThread) {
|
||||
throw new Error("Cannot instantiate WorkerContextFetchingStrategy on the main thread");
|
||||
}
|
||||
parentPort.on("message", (payload) => {
|
||||
if (payload.op === 3 /* SessionInfoResponse */) {
|
||||
this.sessionPromises.get(payload.nonce)?.(payload.session);
|
||||
this.sessionPromises.delete(payload.nonce);
|
||||
}
|
||||
if (payload.op === 4 /* ShardIdentifyResponse */) {
|
||||
const promise = this.waitForIdentifyPromises.get(payload.nonce);
|
||||
if (payload.ok) {
|
||||
promise?.resolve();
|
||||
} else {
|
||||
promise?.reject();
|
||||
}
|
||||
this.waitForIdentifyPromises.delete(payload.nonce);
|
||||
}
|
||||
});
|
||||
}
|
||||
sessionPromises = new Collection2();
|
||||
waitForIdentifyPromises = new Collection2();
|
||||
async retrieveSessionInfo(shardId) {
|
||||
const nonce = Math.random();
|
||||
const payload = {
|
||||
op: 3 /* RetrieveSessionInfo */,
|
||||
shardId,
|
||||
nonce
|
||||
};
|
||||
const promise = new Promise((resolve2) => this.sessionPromises.set(nonce, resolve2));
|
||||
parentPort.postMessage(payload);
|
||||
return promise;
|
||||
}
|
||||
updateSessionInfo(shardId, sessionInfo) {
|
||||
const payload = {
|
||||
op: 4 /* UpdateSessionInfo */,
|
||||
shardId,
|
||||
session: sessionInfo
|
||||
};
|
||||
parentPort.postMessage(payload);
|
||||
}
|
||||
async waitForIdentify(shardId, signal) {
|
||||
const nonce = Math.random();
|
||||
const payload = {
|
||||
op: 5 /* WaitForIdentify */,
|
||||
nonce,
|
||||
shardId
|
||||
};
|
||||
const promise = new Promise(
|
||||
(resolve2, reject) => (
|
||||
// eslint-disable-next-line no-promise-executor-return
|
||||
this.waitForIdentifyPromises.set(nonce, { resolve: resolve2, reject })
|
||||
)
|
||||
);
|
||||
parentPort.postMessage(payload);
|
||||
const listener = /* @__PURE__ */ __name(() => {
|
||||
const payload2 = {
|
||||
op: 8 /* CancelIdentify */,
|
||||
nonce
|
||||
};
|
||||
parentPort.postMessage(payload2);
|
||||
}, "listener");
|
||||
signal.addEventListener("abort", listener);
|
||||
try {
|
||||
await promise;
|
||||
} finally {
|
||||
signal.removeEventListener("abort", listener);
|
||||
}
|
||||
}
|
||||
};
|
||||
__name(WorkerContextFetchingStrategy, "WorkerContextFetchingStrategy");
|
||||
|
||||
// src/ws/WebSocketShard.ts
|
||||
import { Buffer as Buffer2 } from "node:buffer";
|
||||
import { once as once2 } from "node:events";
|
||||
import { clearInterval, clearTimeout, setInterval, setTimeout } from "node:timers";
|
||||
import { setTimeout as sleep2 } from "node:timers/promises";
|
||||
import { URLSearchParams } from "node:url";
|
||||
import { TextDecoder } from "node:util";
|
||||
import { inflate } from "node:zlib";
|
||||
import { Collection as Collection6 } from "@discordjs/collection";
|
||||
import { lazy as lazy2 } from "@discordjs/util";
|
||||
import { AsyncQueue as AsyncQueue2 } from "@sapphire/async-queue";
|
||||
import { AsyncEventEmitter } from "@vladfrangu/async_event_emitter";
|
||||
import {
|
||||
GatewayCloseCodes,
|
||||
GatewayDispatchEvents,
|
||||
GatewayOpcodes as GatewayOpcodes2
|
||||
} from "discord-api-types/v10";
|
||||
import { WebSocket } from "ws";
|
||||
|
||||
// src/utils/constants.ts
|
||||
import process from "node:process";
|
||||
import { Collection as Collection5 } from "@discordjs/collection";
|
||||
import { lazy } from "@discordjs/util";
|
||||
import { APIVersion, GatewayOpcodes } from "discord-api-types/v10";
|
||||
|
||||
// src/strategies/sharding/SimpleShardingStrategy.ts
|
||||
import { Collection as Collection3 } from "@discordjs/collection";
|
||||
|
||||
// src/strategies/context/SimpleContextFetchingStrategy.ts
|
||||
var _SimpleContextFetchingStrategy = class {
|
||||
constructor(manager, options) {
|
||||
this.manager = manager;
|
||||
this.options = options;
|
||||
}
|
||||
static async ensureThrottler(manager) {
|
||||
const throttler = _SimpleContextFetchingStrategy.throttlerCache.get(manager);
|
||||
if (throttler) {
|
||||
return throttler;
|
||||
}
|
||||
const newThrottler = await manager.options.buildIdentifyThrottler(manager);
|
||||
_SimpleContextFetchingStrategy.throttlerCache.set(manager, newThrottler);
|
||||
return newThrottler;
|
||||
}
|
||||
async retrieveSessionInfo(shardId) {
|
||||
return this.manager.options.retrieveSessionInfo(shardId);
|
||||
}
|
||||
updateSessionInfo(shardId, sessionInfo) {
|
||||
return this.manager.options.updateSessionInfo(shardId, sessionInfo);
|
||||
}
|
||||
async waitForIdentify(shardId, signal) {
|
||||
const throttler = await _SimpleContextFetchingStrategy.ensureThrottler(this.manager);
|
||||
await throttler.waitForIdentify(shardId, signal);
|
||||
}
|
||||
};
|
||||
var SimpleContextFetchingStrategy = _SimpleContextFetchingStrategy;
|
||||
__name(SimpleContextFetchingStrategy, "SimpleContextFetchingStrategy");
|
||||
// This strategy assumes every shard is running under the same process - therefore we need a single
|
||||
// IdentifyThrottler per manager.
|
||||
__publicField(SimpleContextFetchingStrategy, "throttlerCache", /* @__PURE__ */ new WeakMap());
|
||||
|
||||
// src/strategies/sharding/SimpleShardingStrategy.ts
|
||||
var SimpleShardingStrategy = class {
|
||||
manager;
|
||||
shards = new Collection3();
|
||||
constructor(manager) {
|
||||
this.manager = manager;
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.spawn}
|
||||
*/
|
||||
async spawn(shardIds) {
|
||||
const strategyOptions = await managerToFetchingStrategyOptions(this.manager);
|
||||
for (const shardId of shardIds) {
|
||||
const strategy = new SimpleContextFetchingStrategy(this.manager, strategyOptions);
|
||||
const shard = new WebSocketShard(strategy, shardId);
|
||||
for (const event of Object.values(WebSocketShardEvents)) {
|
||||
shard.on(event, (payload) => this.manager.emit(event, { ...payload, shardId }));
|
||||
}
|
||||
this.shards.set(shardId, shard);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.connect}
|
||||
*/
|
||||
async connect() {
|
||||
const promises = [];
|
||||
for (const shard of this.shards.values()) {
|
||||
promises.push(shard.connect());
|
||||
}
|
||||
await Promise.all(promises);
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.destroy}
|
||||
*/
|
||||
async destroy(options) {
|
||||
const promises = [];
|
||||
for (const shard of this.shards.values()) {
|
||||
promises.push(shard.destroy(options));
|
||||
}
|
||||
await Promise.all(promises);
|
||||
this.shards.clear();
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.send}
|
||||
*/
|
||||
async send(shardId, payload) {
|
||||
const shard = this.shards.get(shardId);
|
||||
if (!shard) {
|
||||
throw new RangeError(`Shard ${shardId} not found`);
|
||||
}
|
||||
return shard.send(payload);
|
||||
}
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.fetchStatus}
|
||||
*/
|
||||
async fetchStatus() {
|
||||
return this.shards.mapValues((shard) => shard.status);
|
||||
}
|
||||
};
|
||||
__name(SimpleShardingStrategy, "SimpleShardingStrategy");
|
||||
|
||||
// src/throttling/SimpleIdentifyThrottler.ts
|
||||
import { setTimeout as sleep } from "node:timers/promises";
|
||||
import { Collection as Collection4 } from "@discordjs/collection";
|
||||
import { AsyncQueue } from "@sapphire/async-queue";
|
||||
var SimpleIdentifyThrottler = class {
|
||||
constructor(maxConcurrency) {
|
||||
this.maxConcurrency = maxConcurrency;
|
||||
}
|
||||
states = new Collection4();
|
||||
/**
|
||||
* {@inheritDoc IIdentifyThrottler.waitForIdentify}
|
||||
*/
|
||||
async waitForIdentify(shardId, signal) {
|
||||
const key = shardId % this.maxConcurrency;
|
||||
const state = this.states.ensure(key, () => {
|
||||
return {
|
||||
queue: new AsyncQueue(),
|
||||
resetsAt: Number.POSITIVE_INFINITY
|
||||
};
|
||||
});
|
||||
await state.queue.wait({ signal });
|
||||
try {
|
||||
const diff = state.resetsAt - Date.now();
|
||||
if (diff <= 5e3) {
|
||||
const time = diff + Math.random() * 1500;
|
||||
await sleep(time);
|
||||
}
|
||||
state.resetsAt = Date.now() + 5e3;
|
||||
} finally {
|
||||
state.queue.shift();
|
||||
}
|
||||
}
|
||||
};
|
||||
__name(SimpleIdentifyThrottler, "SimpleIdentifyThrottler");
|
||||
|
||||
// src/utils/constants.ts
|
||||
var DefaultDeviceProperty = `@discordjs/ws 0.8.3`;
|
||||
var getDefaultSessionStore = lazy(() => new Collection5());
|
||||
var DefaultWebSocketManagerOptions = {
|
||||
async buildIdentifyThrottler(manager) {
|
||||
const info = await manager.fetchGatewayInformation();
|
||||
return new SimpleIdentifyThrottler(info.session_start_limit.max_concurrency);
|
||||
},
|
||||
buildStrategy: (manager) => new SimpleShardingStrategy(manager),
|
||||
shardCount: null,
|
||||
shardIds: null,
|
||||
largeThreshold: null,
|
||||
initialPresence: null,
|
||||
identifyProperties: {
|
||||
browser: DefaultDeviceProperty,
|
||||
device: DefaultDeviceProperty,
|
||||
os: process.platform
|
||||
},
|
||||
version: APIVersion,
|
||||
encoding: "json" /* JSON */,
|
||||
compression: null,
|
||||
retrieveSessionInfo(shardId) {
|
||||
const store = getDefaultSessionStore();
|
||||
return store.get(shardId) ?? null;
|
||||
},
|
||||
updateSessionInfo(shardId, info) {
|
||||
const store = getDefaultSessionStore();
|
||||
if (info) {
|
||||
store.set(shardId, info);
|
||||
} else {
|
||||
store.delete(shardId);
|
||||
}
|
||||
},
|
||||
handshakeTimeout: 3e4,
|
||||
helloTimeout: 6e4,
|
||||
readyTimeout: 15e3
|
||||
};
|
||||
var ImportantGatewayOpcodes = /* @__PURE__ */ new Set([
|
||||
GatewayOpcodes.Heartbeat,
|
||||
GatewayOpcodes.Identify,
|
||||
GatewayOpcodes.Resume
|
||||
]);
|
||||
function getInitialSendRateLimitState() {
|
||||
return {
|
||||
remaining: 120,
|
||||
resetAt: Date.now() + 6e4
|
||||
};
|
||||
}
|
||||
__name(getInitialSendRateLimitState, "getInitialSendRateLimitState");
|
||||
|
||||
// src/ws/WebSocketShard.ts
|
||||
var getZlibSync = lazy2(async () => import("zlib-sync").then((mod) => mod.default).catch(() => null));
|
||||
var WebSocketShardEvents = /* @__PURE__ */ ((WebSocketShardEvents2) => {
|
||||
WebSocketShardEvents2["Closed"] = "closed";
|
||||
WebSocketShardEvents2["Debug"] = "debug";
|
||||
WebSocketShardEvents2["Dispatch"] = "dispatch";
|
||||
WebSocketShardEvents2["Error"] = "error";
|
||||
WebSocketShardEvents2["HeartbeatComplete"] = "heartbeat";
|
||||
WebSocketShardEvents2["Hello"] = "hello";
|
||||
WebSocketShardEvents2["Ready"] = "ready";
|
||||
WebSocketShardEvents2["Resumed"] = "resumed";
|
||||
return WebSocketShardEvents2;
|
||||
})(WebSocketShardEvents || {});
|
||||
var WebSocketShardDestroyRecovery = /* @__PURE__ */ ((WebSocketShardDestroyRecovery2) => {
|
||||
WebSocketShardDestroyRecovery2[WebSocketShardDestroyRecovery2["Reconnect"] = 0] = "Reconnect";
|
||||
WebSocketShardDestroyRecovery2[WebSocketShardDestroyRecovery2["Resume"] = 1] = "Resume";
|
||||
return WebSocketShardDestroyRecovery2;
|
||||
})(WebSocketShardDestroyRecovery || {});
|
||||
var WebSocketShard = class extends AsyncEventEmitter {
|
||||
connection = null;
|
||||
useIdentifyCompress = false;
|
||||
inflate = null;
|
||||
textDecoder = new TextDecoder();
|
||||
replayedEvents = 0;
|
||||
isAck = true;
|
||||
sendRateLimitState = getInitialSendRateLimitState();
|
||||
initialHeartbeatTimeoutController = null;
|
||||
heartbeatInterval = null;
|
||||
lastHeartbeatAt = -1;
|
||||
// Indicates whether the shard has already resolved its original connect() call
|
||||
initialConnectResolved = false;
|
||||
// Indicates if we failed to connect to the ws url (ECONNREFUSED/ECONNRESET)
|
||||
failedToConnectDueToNetworkError = false;
|
||||
sendQueue = new AsyncQueue2();
|
||||
timeoutAbortControllers = new Collection6();
|
||||
strategy;
|
||||
id;
|
||||
#status = 0 /* Idle */;
|
||||
get status() {
|
||||
return this.#status;
|
||||
}
|
||||
constructor(strategy, id) {
|
||||
super();
|
||||
this.strategy = strategy;
|
||||
this.id = id;
|
||||
}
|
||||
async connect() {
|
||||
const promise = this.initialConnectResolved ? Promise.resolve() : once2(this, "ready" /* Ready */);
|
||||
void this.internalConnect();
|
||||
await promise;
|
||||
this.initialConnectResolved = true;
|
||||
}
|
||||
async internalConnect() {
|
||||
if (this.#status !== 0 /* Idle */) {
|
||||
throw new Error("Tried to connect a shard that wasn't idle");
|
||||
}
|
||||
const { version, encoding, compression } = this.strategy.options;
|
||||
const params = new URLSearchParams({ v: version, encoding });
|
||||
if (compression) {
|
||||
const zlib = await getZlibSync();
|
||||
if (zlib) {
|
||||
params.append("compress", compression);
|
||||
this.inflate = new zlib.Inflate({
|
||||
chunkSize: 65535,
|
||||
to: "string"
|
||||
});
|
||||
} else if (!this.useIdentifyCompress) {
|
||||
this.useIdentifyCompress = true;
|
||||
console.warn(
|
||||
"WebSocketShard: Compression is enabled but zlib-sync is not installed, falling back to identify compress"
|
||||
);
|
||||
}
|
||||
}
|
||||
const session = await this.strategy.retrieveSessionInfo(this.id);
|
||||
const url = `${session?.resumeURL ?? this.strategy.options.gatewayInformation.url}?${params.toString()}`;
|
||||
this.debug([`Connecting to ${url}`]);
|
||||
const connection = new WebSocket(url, { handshakeTimeout: this.strategy.options.handshakeTimeout ?? void 0 }).on("message", this.onMessage.bind(this)).on("error", this.onError.bind(this)).on("close", this.onClose.bind(this));
|
||||
connection.binaryType = "arraybuffer";
|
||||
this.connection = connection;
|
||||
this.#status = 1 /* Connecting */;
|
||||
this.sendRateLimitState = getInitialSendRateLimitState();
|
||||
const { ok } = await this.waitForEvent("hello" /* Hello */, this.strategy.options.helloTimeout);
|
||||
if (!ok) {
|
||||
return;
|
||||
}
|
||||
if (session?.shardCount === this.strategy.options.shardCount) {
|
||||
await this.resume(session);
|
||||
} else {
|
||||
await this.identify();
|
||||
}
|
||||
}
|
||||
async destroy(options = {}) {
|
||||
if (this.#status === 0 /* Idle */) {
|
||||
this.debug(["Tried to destroy a shard that was idle"]);
|
||||
return;
|
||||
}
|
||||
if (!options.code) {
|
||||
options.code = options.recover === 1 /* Resume */ ? 4200 /* Resuming */ : 1e3 /* Normal */;
|
||||
}
|
||||
this.debug([
|
||||
"Destroying shard",
|
||||
`Reason: ${options.reason ?? "none"}`,
|
||||
`Code: ${options.code}`,
|
||||
`Recover: ${options.recover === void 0 ? "none" : WebSocketShardDestroyRecovery[options.recover]}`
|
||||
]);
|
||||
this.isAck = true;
|
||||
if (this.heartbeatInterval) {
|
||||
clearInterval(this.heartbeatInterval);
|
||||
}
|
||||
if (this.initialHeartbeatTimeoutController) {
|
||||
this.initialHeartbeatTimeoutController.abort();
|
||||
this.initialHeartbeatTimeoutController = null;
|
||||
}
|
||||
this.lastHeartbeatAt = -1;
|
||||
for (const controller of this.timeoutAbortControllers.values()) {
|
||||
controller.abort();
|
||||
}
|
||||
this.timeoutAbortControllers.clear();
|
||||
this.failedToConnectDueToNetworkError = false;
|
||||
if (options.recover !== 1 /* Resume */) {
|
||||
await this.strategy.updateSessionInfo(this.id, null);
|
||||
}
|
||||
if (this.connection) {
|
||||
this.connection.removeAllListeners("message");
|
||||
this.connection.removeAllListeners("close");
|
||||
const shouldClose = this.connection.readyState === WebSocket.OPEN;
|
||||
this.debug([
|
||||
"Connection status during destroy",
|
||||
`Needs closing: ${shouldClose}`,
|
||||
`Ready state: ${this.connection.readyState}`
|
||||
]);
|
||||
if (shouldClose) {
|
||||
this.connection.close(options.code, options.reason);
|
||||
await once2(this.connection, "close");
|
||||
this.emit("closed" /* Closed */, { code: options.code });
|
||||
}
|
||||
this.connection.removeAllListeners("error");
|
||||
} else {
|
||||
this.debug(["Destroying a shard that has no connection; please open an issue on GitHub"]);
|
||||
}
|
||||
this.#status = 0 /* Idle */;
|
||||
if (options.recover !== void 0) {
|
||||
await sleep2(500);
|
||||
return this.internalConnect();
|
||||
}
|
||||
}
|
||||
async waitForEvent(event, timeoutDuration) {
|
||||
this.debug([`Waiting for event ${event} ${timeoutDuration ? `for ${timeoutDuration}ms` : "indefinitely"}`]);
|
||||
const timeoutController = new AbortController();
|
||||
const timeout = timeoutDuration ? setTimeout(() => timeoutController.abort(), timeoutDuration).unref() : null;
|
||||
this.timeoutAbortControllers.set(event, timeoutController);
|
||||
const closeController = new AbortController();
|
||||
try {
|
||||
const closed = await Promise.race([
|
||||
once2(this, event, { signal: timeoutController.signal }).then(() => false),
|
||||
once2(this, "closed" /* Closed */, { signal: closeController.signal }).then(() => true)
|
||||
]);
|
||||
return { ok: !closed };
|
||||
} catch {
|
||||
void this.destroy({
|
||||
code: 1e3 /* Normal */,
|
||||
reason: "Something timed out or went wrong while waiting for an event",
|
||||
recover: 0 /* Reconnect */
|
||||
});
|
||||
return { ok: false };
|
||||
} finally {
|
||||
if (timeout) {
|
||||
clearTimeout(timeout);
|
||||
}
|
||||
this.timeoutAbortControllers.delete(event);
|
||||
if (!closeController.signal.aborted) {
|
||||
closeController.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
async send(payload) {
|
||||
if (!this.connection) {
|
||||
throw new Error("WebSocketShard wasn't connected");
|
||||
}
|
||||
if (this.#status !== 3 /* Ready */ && !ImportantGatewayOpcodes.has(payload.op)) {
|
||||
this.debug(["Tried to send a non-crucial payload before the shard was ready, waiting"]);
|
||||
try {
|
||||
await once2(this, "ready" /* Ready */);
|
||||
} catch {
|
||||
return this.send(payload);
|
||||
}
|
||||
}
|
||||
await this.sendQueue.wait();
|
||||
if (--this.sendRateLimitState.remaining <= 0) {
|
||||
const now = Date.now();
|
||||
if (this.sendRateLimitState.resetAt > now) {
|
||||
const sleepFor = this.sendRateLimitState.resetAt - now;
|
||||
this.debug([`Was about to hit the send rate limit, sleeping for ${sleepFor}ms`]);
|
||||
const controller = new AbortController();
|
||||
const interrupted = await Promise.race([
|
||||
sleep2(sleepFor).then(() => false),
|
||||
once2(this, "closed" /* Closed */, { signal: controller.signal }).then(() => true)
|
||||
]);
|
||||
if (interrupted) {
|
||||
this.debug(["Connection closed while waiting for the send rate limit to reset, re-queueing payload"]);
|
||||
this.sendQueue.shift();
|
||||
return this.send(payload);
|
||||
}
|
||||
controller.abort();
|
||||
}
|
||||
this.sendRateLimitState = getInitialSendRateLimitState();
|
||||
}
|
||||
this.sendQueue.shift();
|
||||
this.connection.send(JSON.stringify(payload));
|
||||
}
|
||||
async identify() {
|
||||
this.debug(["Waiting for identify throttle"]);
|
||||
const controller = new AbortController();
|
||||
const closeHandler = /* @__PURE__ */ __name(() => {
|
||||
controller.abort();
|
||||
}, "closeHandler");
|
||||
this.on("closed" /* Closed */, closeHandler);
|
||||
try {
|
||||
await this.strategy.waitForIdentify(this.id, controller.signal);
|
||||
} catch {
|
||||
this.debug(["Was waiting for an identify, but the shard closed in the meantime"]);
|
||||
return;
|
||||
} finally {
|
||||
this.off("closed" /* Closed */, closeHandler);
|
||||
}
|
||||
this.debug([
|
||||
"Identifying",
|
||||
`shard id: ${this.id.toString()}`,
|
||||
`shard count: ${this.strategy.options.shardCount}`,
|
||||
`intents: ${this.strategy.options.intents}`,
|
||||
`compression: ${this.inflate ? "zlib-stream" : this.useIdentifyCompress ? "identify" : "none"}`
|
||||
]);
|
||||
const d = {
|
||||
token: this.strategy.options.token,
|
||||
properties: this.strategy.options.identifyProperties,
|
||||
intents: this.strategy.options.intents,
|
||||
compress: this.useIdentifyCompress,
|
||||
shard: [this.id, this.strategy.options.shardCount]
|
||||
};
|
||||
if (this.strategy.options.largeThreshold) {
|
||||
d.large_threshold = this.strategy.options.largeThreshold;
|
||||
}
|
||||
if (this.strategy.options.initialPresence) {
|
||||
d.presence = this.strategy.options.initialPresence;
|
||||
}
|
||||
await this.send({
|
||||
op: GatewayOpcodes2.Identify,
|
||||
d
|
||||
});
|
||||
await this.waitForEvent("ready" /* Ready */, this.strategy.options.readyTimeout);
|
||||
}
|
||||
async resume(session) {
|
||||
this.debug([
|
||||
"Resuming session",
|
||||
`resume url: ${session.resumeURL}`,
|
||||
`sequence: ${session.sequence}`,
|
||||
`shard id: ${this.id.toString()}`
|
||||
]);
|
||||
this.#status = 2 /* Resuming */;
|
||||
this.replayedEvents = 0;
|
||||
return this.send({
|
||||
op: GatewayOpcodes2.Resume,
|
||||
d: {
|
||||
token: this.strategy.options.token,
|
||||
seq: session.sequence,
|
||||
session_id: session.sessionId
|
||||
}
|
||||
});
|
||||
}
|
||||
async heartbeat(requested = false) {
|
||||
if (!this.isAck && !requested) {
|
||||
return this.destroy({ reason: "Zombie connection", recover: 1 /* Resume */ });
|
||||
}
|
||||
const session = await this.strategy.retrieveSessionInfo(this.id);
|
||||
await this.send({
|
||||
op: GatewayOpcodes2.Heartbeat,
|
||||
d: session?.sequence ?? null
|
||||
});
|
||||
this.lastHeartbeatAt = Date.now();
|
||||
this.isAck = false;
|
||||
}
|
||||
async unpackMessage(data, isBinary) {
|
||||
const decompressable = new Uint8Array(data);
|
||||
if (!isBinary) {
|
||||
return JSON.parse(this.textDecoder.decode(decompressable));
|
||||
}
|
||||
if (this.useIdentifyCompress) {
|
||||
return new Promise((resolve2, reject) => {
|
||||
inflate(decompressable, { chunkSize: 65535 }, (err, result) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
resolve2(JSON.parse(this.textDecoder.decode(result)));
|
||||
});
|
||||
});
|
||||
}
|
||||
if (this.inflate) {
|
||||
const l = decompressable.length;
|
||||
const flush = l >= 4 && decompressable[l - 4] === 0 && decompressable[l - 3] === 0 && decompressable[l - 2] === 255 && decompressable[l - 1] === 255;
|
||||
const zlib = await getZlibSync();
|
||||
this.inflate.push(Buffer2.from(decompressable), flush ? zlib.Z_SYNC_FLUSH : zlib.Z_NO_FLUSH);
|
||||
if (this.inflate.err) {
|
||||
this.emit("error" /* Error */, {
|
||||
error: new Error(`${this.inflate.err}${this.inflate.msg ? `: ${this.inflate.msg}` : ""}`)
|
||||
});
|
||||
}
|
||||
if (!flush) {
|
||||
return null;
|
||||
}
|
||||
const { result } = this.inflate;
|
||||
if (!result) {
|
||||
return null;
|
||||
}
|
||||
return JSON.parse(typeof result === "string" ? result : this.textDecoder.decode(result));
|
||||
}
|
||||
this.debug([
|
||||
"Received a message we were unable to decompress",
|
||||
`isBinary: ${isBinary.toString()}`,
|
||||
`useIdentifyCompress: ${this.useIdentifyCompress.toString()}`,
|
||||
`inflate: ${Boolean(this.inflate).toString()}`
|
||||
]);
|
||||
return null;
|
||||
}
|
||||
async onMessage(data, isBinary) {
|
||||
const payload = await this.unpackMessage(data, isBinary);
|
||||
if (!payload) {
|
||||
return;
|
||||
}
|
||||
switch (payload.op) {
|
||||
case GatewayOpcodes2.Dispatch: {
|
||||
if (this.#status === 2 /* Resuming */) {
|
||||
this.replayedEvents++;
|
||||
}
|
||||
switch (payload.t) {
|
||||
case GatewayDispatchEvents.Ready: {
|
||||
this.#status = 3 /* Ready */;
|
||||
const session2 = {
|
||||
sequence: payload.s,
|
||||
sessionId: payload.d.session_id,
|
||||
shardId: this.id,
|
||||
shardCount: this.strategy.options.shardCount,
|
||||
resumeURL: payload.d.resume_gateway_url
|
||||
};
|
||||
await this.strategy.updateSessionInfo(this.id, session2);
|
||||
this.emit("ready" /* Ready */, { data: payload.d });
|
||||
break;
|
||||
}
|
||||
case GatewayDispatchEvents.Resumed: {
|
||||
this.#status = 3 /* Ready */;
|
||||
this.debug([`Resumed and replayed ${this.replayedEvents} events`]);
|
||||
this.emit("resumed" /* Resumed */);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
const session = await this.strategy.retrieveSessionInfo(this.id);
|
||||
if (session) {
|
||||
if (payload.s > session.sequence) {
|
||||
await this.strategy.updateSessionInfo(this.id, { ...session, sequence: payload.s });
|
||||
}
|
||||
} else {
|
||||
this.debug([
|
||||
`Received a ${payload.t} event but no session is available. Session information cannot be re-constructed in this state without a full reconnect`
|
||||
]);
|
||||
}
|
||||
this.emit("dispatch" /* Dispatch */, { data: payload });
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes2.Heartbeat: {
|
||||
await this.heartbeat(true);
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes2.Reconnect: {
|
||||
await this.destroy({
|
||||
reason: "Told to reconnect by Discord",
|
||||
recover: 1 /* Resume */
|
||||
});
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes2.InvalidSession: {
|
||||
this.debug([`Invalid session; will attempt to resume: ${payload.d.toString()}`]);
|
||||
const session = await this.strategy.retrieveSessionInfo(this.id);
|
||||
if (payload.d && session) {
|
||||
await this.resume(session);
|
||||
} else {
|
||||
await this.destroy({
|
||||
reason: "Invalid session",
|
||||
recover: 0 /* Reconnect */
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes2.Hello: {
|
||||
this.emit("hello" /* Hello */);
|
||||
const jitter = Math.random();
|
||||
const firstWait = Math.floor(payload.d.heartbeat_interval * jitter);
|
||||
this.debug([`Preparing first heartbeat of the connection with a jitter of ${jitter}; waiting ${firstWait}ms`]);
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
this.initialHeartbeatTimeoutController = controller;
|
||||
await sleep2(firstWait, void 0, { signal: controller.signal });
|
||||
} catch {
|
||||
this.debug(["Cancelled initial heartbeat due to #destroy being called"]);
|
||||
return;
|
||||
} finally {
|
||||
this.initialHeartbeatTimeoutController = null;
|
||||
}
|
||||
await this.heartbeat();
|
||||
this.debug([`First heartbeat sent, starting to beat every ${payload.d.heartbeat_interval}ms`]);
|
||||
this.heartbeatInterval = setInterval(() => void this.heartbeat(), payload.d.heartbeat_interval);
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes2.HeartbeatAck: {
|
||||
this.isAck = true;
|
||||
const ackAt = Date.now();
|
||||
this.emit("heartbeat" /* HeartbeatComplete */, {
|
||||
ackAt,
|
||||
heartbeatAt: this.lastHeartbeatAt,
|
||||
latency: ackAt - this.lastHeartbeatAt
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
onError(error) {
|
||||
if ("code" in error && ["ECONNRESET", "ECONNREFUSED"].includes(error.code)) {
|
||||
this.debug(["Failed to connect to the gateway URL specified due to a network error"]);
|
||||
this.failedToConnectDueToNetworkError = true;
|
||||
return;
|
||||
}
|
||||
this.emit("error" /* Error */, { error });
|
||||
}
|
||||
async onClose(code) {
|
||||
this.emit("closed" /* Closed */, { code });
|
||||
switch (code) {
|
||||
case 1e3 /* Normal */: {
|
||||
return this.destroy({
|
||||
code,
|
||||
reason: "Got disconnected by Discord",
|
||||
recover: 0 /* Reconnect */
|
||||
});
|
||||
}
|
||||
case 4200 /* Resuming */: {
|
||||
break;
|
||||
}
|
||||
case GatewayCloseCodes.UnknownError: {
|
||||
this.debug([`An unknown error occurred: ${code}`]);
|
||||
return this.destroy({ code, recover: 1 /* Resume */ });
|
||||
}
|
||||
case GatewayCloseCodes.UnknownOpcode: {
|
||||
this.debug(["An invalid opcode was sent to Discord."]);
|
||||
return this.destroy({ code, recover: 1 /* Resume */ });
|
||||
}
|
||||
case GatewayCloseCodes.DecodeError: {
|
||||
this.debug(["An invalid payload was sent to Discord."]);
|
||||
return this.destroy({ code, recover: 1 /* Resume */ });
|
||||
}
|
||||
case GatewayCloseCodes.NotAuthenticated: {
|
||||
this.debug(["A request was somehow sent before the identify/resume payload."]);
|
||||
return this.destroy({ code, recover: 0 /* Reconnect */ });
|
||||
}
|
||||
case GatewayCloseCodes.AuthenticationFailed: {
|
||||
throw new Error("Authentication failed");
|
||||
}
|
||||
case GatewayCloseCodes.AlreadyAuthenticated: {
|
||||
this.debug(["More than one auth payload was sent."]);
|
||||
return this.destroy({ code, recover: 0 /* Reconnect */ });
|
||||
}
|
||||
case GatewayCloseCodes.InvalidSeq: {
|
||||
this.debug(["An invalid sequence was sent."]);
|
||||
return this.destroy({ code, recover: 0 /* Reconnect */ });
|
||||
}
|
||||
case GatewayCloseCodes.RateLimited: {
|
||||
this.debug(["The WebSocket rate limit has been hit, this should never happen"]);
|
||||
return this.destroy({ code, recover: 0 /* Reconnect */ });
|
||||
}
|
||||
case GatewayCloseCodes.SessionTimedOut: {
|
||||
this.debug(["Session timed out."]);
|
||||
return this.destroy({ code, recover: 1 /* Resume */ });
|
||||
}
|
||||
case GatewayCloseCodes.InvalidShard: {
|
||||
throw new Error("Invalid shard");
|
||||
}
|
||||
case GatewayCloseCodes.ShardingRequired: {
|
||||
throw new Error("Sharding is required");
|
||||
}
|
||||
case GatewayCloseCodes.InvalidAPIVersion: {
|
||||
throw new Error("Used an invalid API version");
|
||||
}
|
||||
case GatewayCloseCodes.InvalidIntents: {
|
||||
throw new Error("Used invalid intents");
|
||||
}
|
||||
case GatewayCloseCodes.DisallowedIntents: {
|
||||
throw new Error("Used disallowed intents");
|
||||
}
|
||||
default: {
|
||||
this.debug([
|
||||
`The gateway closed with an unexpected code ${code}, attempting to ${this.failedToConnectDueToNetworkError ? "reconnect" : "resume"}.`
|
||||
]);
|
||||
return this.destroy({
|
||||
code,
|
||||
recover: this.failedToConnectDueToNetworkError ? 0 /* Reconnect */ : 1 /* Resume */
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
debug(messages) {
|
||||
const message = `${messages[0]}${messages.length > 1 ? `
|
||||
${messages.slice(1).map((m) => ` ${m}`).join("\n")}` : ""}`;
|
||||
this.emit("debug" /* Debug */, { message });
|
||||
}
|
||||
};
|
||||
__name(WebSocketShard, "WebSocketShard");
|
||||
|
||||
// src/utils/WorkerBootstrapper.ts
|
||||
var WorkerBootstrapper = class {
|
||||
/**
|
||||
* The data passed to the worker thread
|
||||
*/
|
||||
data = workerData;
|
||||
/**
|
||||
* The shards that are managed by this worker
|
||||
*/
|
||||
shards = new Collection7();
|
||||
constructor() {
|
||||
if (isMainThread2) {
|
||||
throw new Error("Expected WorkerBootstrap to not be used within the main thread");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Helper method to initiate a shard's connection process
|
||||
*/
|
||||
async connect(shardId) {
|
||||
const shard = this.shards.get(shardId);
|
||||
if (!shard) {
|
||||
throw new RangeError(`Shard ${shardId} does not exist`);
|
||||
}
|
||||
await shard.connect();
|
||||
}
|
||||
/**
|
||||
* Helper method to destroy a shard
|
||||
*/
|
||||
async destroy(shardId, options) {
|
||||
const shard = this.shards.get(shardId);
|
||||
if (!shard) {
|
||||
throw new RangeError(`Shard ${shardId} does not exist`);
|
||||
}
|
||||
await shard.destroy(options);
|
||||
}
|
||||
/**
|
||||
* Helper method to attach event listeners to the parentPort
|
||||
*/
|
||||
setupThreadEvents() {
|
||||
parentPort2.on("messageerror", (err) => {
|
||||
throw err;
|
||||
}).on("message", async (payload) => {
|
||||
switch (payload.op) {
|
||||
case 0 /* Connect */: {
|
||||
await this.connect(payload.shardId);
|
||||
const response = {
|
||||
op: 0 /* Connected */,
|
||||
shardId: payload.shardId
|
||||
};
|
||||
parentPort2.postMessage(response);
|
||||
break;
|
||||
}
|
||||
case 1 /* Destroy */: {
|
||||
await this.destroy(payload.shardId, payload.options);
|
||||
const response = {
|
||||
op: 1 /* Destroyed */,
|
||||
shardId: payload.shardId
|
||||
};
|
||||
parentPort2.postMessage(response);
|
||||
break;
|
||||
}
|
||||
case 2 /* Send */: {
|
||||
const shard = this.shards.get(payload.shardId);
|
||||
if (!shard) {
|
||||
throw new RangeError(`Shard ${payload.shardId} does not exist`);
|
||||
}
|
||||
await shard.send(payload.payload);
|
||||
break;
|
||||
}
|
||||
case 3 /* SessionInfoResponse */: {
|
||||
break;
|
||||
}
|
||||
case 4 /* ShardIdentifyResponse */: {
|
||||
break;
|
||||
}
|
||||
case 5 /* FetchStatus */: {
|
||||
const shard = this.shards.get(payload.shardId);
|
||||
if (!shard) {
|
||||
throw new Error(`Shard ${payload.shardId} does not exist`);
|
||||
}
|
||||
const response = {
|
||||
op: 6 /* FetchStatusResponse */,
|
||||
status: shard.status,
|
||||
nonce: payload.nonce
|
||||
};
|
||||
parentPort2.postMessage(response);
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Bootstraps the worker thread with the provided options
|
||||
*/
|
||||
async bootstrap(options = {}) {
|
||||
for (const shardId of this.data.shardIds) {
|
||||
const shard = new WebSocketShard(new WorkerContextFetchingStrategy(this.data), shardId);
|
||||
for (const event of options.forwardEvents ?? Object.values(WebSocketShardEvents)) {
|
||||
shard.on(event, (data) => {
|
||||
const payload = {
|
||||
op: 2 /* Event */,
|
||||
event,
|
||||
data,
|
||||
shardId
|
||||
};
|
||||
parentPort2.postMessage(payload);
|
||||
});
|
||||
}
|
||||
await options.shardCallback?.(shard);
|
||||
this.shards.set(shardId, shard);
|
||||
}
|
||||
this.setupThreadEvents();
|
||||
const message = {
|
||||
op: 7 /* WorkerReady */
|
||||
};
|
||||
parentPort2.postMessage(message);
|
||||
}
|
||||
};
|
||||
__name(WorkerBootstrapper, "WorkerBootstrapper");
|
||||
|
||||
// src/strategies/sharding/defaultWorker.ts
|
||||
var bootstrapper = new WorkerBootstrapper();
|
||||
void bootstrapper.bootstrap();
|
||||
//# sourceMappingURL=defaultWorker.mjs.map
|
1
node_modules/@discordjs/ws/dist/defaultWorker.mjs.map
generated
vendored
Normal file
1
node_modules/@discordjs/ws/dist/defaultWorker.mjs.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
667
node_modules/@discordjs/ws/dist/index.d.ts
generated
vendored
Normal file
667
node_modules/@discordjs/ws/dist/index.d.ts
generated
vendored
Normal file
|
@ -0,0 +1,667 @@
|
|||
import * as _discordjs_util from '@discordjs/util';
|
||||
import { Awaitable } from '@discordjs/util';
|
||||
import { GatewayDispatchPayload, GatewayReadyDispatchData, GatewaySendPayload, GatewayOpcodes, GatewayIntentBits, GatewayIdentifyProperties, GatewayPresenceUpdateData, APIGatewayBotInfo } from 'discord-api-types/v10';
|
||||
import * as _discordjs_collection from '@discordjs/collection';
|
||||
import { Collection } from '@discordjs/collection';
|
||||
import { AsyncEventEmitter } from '@vladfrangu/async_event_emitter';
|
||||
import { REST } from '@discordjs/rest';
|
||||
import { AsyncQueue } from '@sapphire/async-queue';
|
||||
|
||||
declare enum WebSocketShardEvents {
|
||||
Closed = "closed",
|
||||
Debug = "debug",
|
||||
Dispatch = "dispatch",
|
||||
Error = "error",
|
||||
HeartbeatComplete = "heartbeat",
|
||||
Hello = "hello",
|
||||
Ready = "ready",
|
||||
Resumed = "resumed"
|
||||
}
|
||||
declare enum WebSocketShardStatus {
|
||||
Idle = 0,
|
||||
Connecting = 1,
|
||||
Resuming = 2,
|
||||
Ready = 3
|
||||
}
|
||||
declare enum WebSocketShardDestroyRecovery {
|
||||
Reconnect = 0,
|
||||
Resume = 1
|
||||
}
|
||||
type WebSocketShardEventsMap = {
|
||||
[WebSocketShardEvents.Closed]: [{
|
||||
code: number;
|
||||
}];
|
||||
[WebSocketShardEvents.Debug]: [payload: {
|
||||
message: string;
|
||||
}];
|
||||
[WebSocketShardEvents.Dispatch]: [payload: {
|
||||
data: GatewayDispatchPayload;
|
||||
}];
|
||||
[WebSocketShardEvents.Error]: [payload: {
|
||||
error: Error;
|
||||
}];
|
||||
[WebSocketShardEvents.Hello]: [];
|
||||
[WebSocketShardEvents.Ready]: [payload: {
|
||||
data: GatewayReadyDispatchData;
|
||||
}];
|
||||
[WebSocketShardEvents.Resumed]: [];
|
||||
[WebSocketShardEvents.HeartbeatComplete]: [payload: {
|
||||
ackAt: number;
|
||||
heartbeatAt: number;
|
||||
latency: number;
|
||||
}];
|
||||
};
|
||||
interface WebSocketShardDestroyOptions {
|
||||
code?: number;
|
||||
reason?: string;
|
||||
recover?: WebSocketShardDestroyRecovery;
|
||||
}
|
||||
declare enum CloseCodes {
|
||||
Normal = 1000,
|
||||
Resuming = 4200
|
||||
}
|
||||
interface SendRateLimitState {
|
||||
remaining: number;
|
||||
resetAt: number;
|
||||
}
|
||||
declare class WebSocketShard extends AsyncEventEmitter<WebSocketShardEventsMap> {
|
||||
#private;
|
||||
private connection;
|
||||
private useIdentifyCompress;
|
||||
private inflate;
|
||||
private readonly textDecoder;
|
||||
private replayedEvents;
|
||||
private isAck;
|
||||
private sendRateLimitState;
|
||||
private initialHeartbeatTimeoutController;
|
||||
private heartbeatInterval;
|
||||
private lastHeartbeatAt;
|
||||
private initialConnectResolved;
|
||||
private failedToConnectDueToNetworkError;
|
||||
private readonly sendQueue;
|
||||
private readonly timeoutAbortControllers;
|
||||
private readonly strategy;
|
||||
readonly id: number;
|
||||
get status(): WebSocketShardStatus;
|
||||
constructor(strategy: IContextFetchingStrategy, id: number);
|
||||
connect(): Promise<void>;
|
||||
private internalConnect;
|
||||
destroy(options?: WebSocketShardDestroyOptions): Promise<void>;
|
||||
private waitForEvent;
|
||||
send(payload: GatewaySendPayload): Promise<void>;
|
||||
private identify;
|
||||
private resume;
|
||||
private heartbeat;
|
||||
private unpackMessage;
|
||||
private onMessage;
|
||||
private onError;
|
||||
private onClose;
|
||||
private debug;
|
||||
}
|
||||
|
||||
/**
|
||||
* Strategies responsible for spawning, initializing connections, destroying shards, and relaying events
|
||||
*/
|
||||
interface IShardingStrategy {
|
||||
/**
|
||||
* Initializes all the shards
|
||||
*/
|
||||
connect(): Awaitable<void>;
|
||||
/**
|
||||
* Destroys all the shards
|
||||
*/
|
||||
destroy(options?: Omit<WebSocketShardDestroyOptions, 'recover'>): Awaitable<void>;
|
||||
/**
|
||||
* Fetches the status of all the shards
|
||||
*/
|
||||
fetchStatus(): Awaitable<Collection<number, WebSocketShardStatus>>;
|
||||
/**
|
||||
* Sends a payload to a shard
|
||||
*/
|
||||
send(shardId: number, payload: GatewaySendPayload): Awaitable<void>;
|
||||
/**
|
||||
* Spawns all the shards
|
||||
*/
|
||||
spawn(shardIds: number[]): Awaitable<void>;
|
||||
}
|
||||
|
||||
/**
|
||||
* IdentifyThrottlers are responsible for dictating when a shard is allowed to identify.
|
||||
*
|
||||
* @see {@link https://discord.com/developers/docs/topics/gateway#sharding-max-concurrency}
|
||||
*/
|
||||
interface IIdentifyThrottler {
|
||||
/**
|
||||
* Resolves once the given shard should be allowed to identify, or rejects if the operation was aborted.
|
||||
*/
|
||||
waitForIdentify(shardId: number, signal: AbortSignal): Promise<void>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple strategy that just spawns shards in the current process
|
||||
*/
|
||||
declare class SimpleShardingStrategy implements IShardingStrategy {
|
||||
private readonly manager;
|
||||
private readonly shards;
|
||||
constructor(manager: WebSocketManager);
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.spawn}
|
||||
*/
|
||||
spawn(shardIds: number[]): Promise<void>;
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.connect}
|
||||
*/
|
||||
connect(): Promise<void>;
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.destroy}
|
||||
*/
|
||||
destroy(options?: Omit<WebSocketShardDestroyOptions, 'recover'>): Promise<void>;
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.send}
|
||||
*/
|
||||
send(shardId: number, payload: GatewaySendPayload): Promise<void>;
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.fetchStatus}
|
||||
*/
|
||||
fetchStatus(): Promise<Collection<number, WebSocketShardStatus>>;
|
||||
}
|
||||
|
||||
/**
|
||||
* The state of a rate limit key's identify queue.
|
||||
*/
|
||||
interface IdentifyState {
|
||||
queue: AsyncQueue;
|
||||
resetsAt: number;
|
||||
}
|
||||
/**
|
||||
* Local, in-memory identify throttler.
|
||||
*/
|
||||
declare class SimpleIdentifyThrottler implements IIdentifyThrottler {
|
||||
private readonly maxConcurrency;
|
||||
private readonly states;
|
||||
constructor(maxConcurrency: number);
|
||||
/**
|
||||
* {@inheritDoc IIdentifyThrottler.waitForIdentify}
|
||||
*/
|
||||
waitForIdentify(shardId: number, signal: AbortSignal): Promise<void>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Valid encoding types
|
||||
*/
|
||||
declare enum Encoding {
|
||||
JSON = "json"
|
||||
}
|
||||
/**
|
||||
* Valid compression methods
|
||||
*/
|
||||
declare enum CompressionMethod {
|
||||
ZlibStream = "zlib-stream"
|
||||
}
|
||||
declare const DefaultDeviceProperty: `@discordjs/ws ${string}`;
|
||||
/**
|
||||
* Default options used by the manager
|
||||
*/
|
||||
declare const DefaultWebSocketManagerOptions: {
|
||||
readonly buildIdentifyThrottler: (manager: WebSocketManager) => Promise<SimpleIdentifyThrottler>;
|
||||
readonly buildStrategy: (manager: WebSocketManager) => SimpleShardingStrategy;
|
||||
readonly shardCount: null;
|
||||
readonly shardIds: null;
|
||||
readonly largeThreshold: null;
|
||||
readonly initialPresence: null;
|
||||
readonly identifyProperties: {
|
||||
readonly browser: `@discordjs/ws ${string}`;
|
||||
readonly device: `@discordjs/ws ${string}`;
|
||||
readonly os: NodeJS.Platform;
|
||||
};
|
||||
readonly version: "10";
|
||||
readonly encoding: Encoding;
|
||||
readonly compression: null;
|
||||
readonly retrieveSessionInfo: (shardId: number) => SessionInfo | null;
|
||||
readonly updateSessionInfo: (shardId: number, info: SessionInfo | null) => void;
|
||||
readonly handshakeTimeout: 30000;
|
||||
readonly helloTimeout: 60000;
|
||||
readonly readyTimeout: 15000;
|
||||
};
|
||||
declare const ImportantGatewayOpcodes: Set<GatewayOpcodes>;
|
||||
declare function getInitialSendRateLimitState(): SendRateLimitState;
|
||||
|
||||
/**
|
||||
* Represents a range of shard ids
|
||||
*/
|
||||
interface ShardRange {
|
||||
end: number;
|
||||
start: number;
|
||||
}
|
||||
/**
|
||||
* Session information for a given shard, used to resume a session
|
||||
*/
|
||||
interface SessionInfo {
|
||||
/**
|
||||
* URL to use when resuming
|
||||
*/
|
||||
resumeURL: string;
|
||||
/**
|
||||
* The sequence number of the last message sent by the shard
|
||||
*/
|
||||
sequence: number;
|
||||
/**
|
||||
* Session id for this shard
|
||||
*/
|
||||
sessionId: string;
|
||||
/**
|
||||
* The total number of shards at the time of this shard identifying
|
||||
*/
|
||||
shardCount: number;
|
||||
/**
|
||||
* The id of the shard
|
||||
*/
|
||||
shardId: number;
|
||||
}
|
||||
/**
|
||||
* Required options for the WebSocketManager
|
||||
*/
|
||||
interface RequiredWebSocketManagerOptions {
|
||||
/**
|
||||
* The intents to request
|
||||
*/
|
||||
intents: GatewayIntentBits | 0;
|
||||
/**
|
||||
* The REST instance to use for fetching gateway information
|
||||
*/
|
||||
rest: REST;
|
||||
/**
|
||||
* The token to use for identifying with the gateway
|
||||
*/
|
||||
token: string;
|
||||
}
|
||||
/**
|
||||
* Optional additional configuration for the WebSocketManager
|
||||
*/
|
||||
interface OptionalWebSocketManagerOptions {
|
||||
/**
|
||||
* Builds an identify throttler to use for this manager's shards
|
||||
*/
|
||||
buildIdentifyThrottler(manager: WebSocketManager): Awaitable<IIdentifyThrottler>;
|
||||
/**
|
||||
* Builds the strategy to use for sharding
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const manager = new WebSocketManager({
|
||||
* token: process.env.DISCORD_TOKEN,
|
||||
* intents: 0, // for no intents
|
||||
* rest,
|
||||
* buildStrategy: (manager) => new WorkerShardingStrategy(manager, { shardsPerWorker: 2 }),
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
buildStrategy(manager: WebSocketManager): IShardingStrategy;
|
||||
/**
|
||||
* The compression method to use
|
||||
*
|
||||
* @defaultValue `null` (no compression)
|
||||
*/
|
||||
compression: CompressionMethod | null;
|
||||
/**
|
||||
* The encoding to use
|
||||
*
|
||||
* @defaultValue `'json'`
|
||||
*/
|
||||
encoding: Encoding;
|
||||
/**
|
||||
* How long to wait for a shard to connect before giving up
|
||||
*/
|
||||
handshakeTimeout: number | null;
|
||||
/**
|
||||
* How long to wait for a shard's HELLO packet before giving up
|
||||
*/
|
||||
helloTimeout: number | null;
|
||||
/**
|
||||
* Properties to send to the gateway when identifying
|
||||
*/
|
||||
identifyProperties: GatewayIdentifyProperties;
|
||||
/**
|
||||
* Initial presence data to send to the gateway when identifying
|
||||
*/
|
||||
initialPresence: GatewayPresenceUpdateData | null;
|
||||
/**
|
||||
* Value between 50 and 250, total number of members where the gateway will stop sending offline members in the guild member list
|
||||
*/
|
||||
largeThreshold: number | null;
|
||||
/**
|
||||
* How long to wait for a shard's READY packet before giving up
|
||||
*/
|
||||
readyTimeout: number | null;
|
||||
/**
|
||||
* Function used to retrieve session information (and attempt to resume) for a given shard
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const manager = new WebSocketManager({
|
||||
* async retrieveSessionInfo(shardId): Awaitable<SessionInfo | null> {
|
||||
* // Fetch this info from redis or similar
|
||||
* return { sessionId: string, sequence: number };
|
||||
* // Return null if no information is found
|
||||
* },
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
retrieveSessionInfo(shardId: number): Awaitable<SessionInfo | null>;
|
||||
/**
|
||||
* The total number of shards across all WebsocketManagers you intend to instantiate.
|
||||
* Use `null` to use Discord's recommended shard count
|
||||
*/
|
||||
shardCount: number | null;
|
||||
/**
|
||||
* The ids of the shards this WebSocketManager should manage.
|
||||
* Use `null` to simply spawn 0 through `shardCount - 1`
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const manager = new WebSocketManager({
|
||||
* shardIds: [1, 3, 7], // spawns shard 1, 3, and 7, nothing else
|
||||
* });
|
||||
* ```
|
||||
* @example
|
||||
* ```ts
|
||||
* const manager = new WebSocketManager({
|
||||
* shardIds: {
|
||||
* start: 3,
|
||||
* end: 6,
|
||||
* }, // spawns shards 3, 4, 5, and 6
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
shardIds: number[] | ShardRange | null;
|
||||
/**
|
||||
* Function used to store session information for a given shard
|
||||
*/
|
||||
updateSessionInfo(shardId: number, sessionInfo: SessionInfo | null): Awaitable<void>;
|
||||
/**
|
||||
* The gateway version to use
|
||||
*
|
||||
* @defaultValue `'10'`
|
||||
*/
|
||||
version: string;
|
||||
}
|
||||
type WebSocketManagerOptions = OptionalWebSocketManagerOptions & RequiredWebSocketManagerOptions;
|
||||
type ManagerShardEventsMap = {
|
||||
[K in keyof WebSocketShardEventsMap]: [
|
||||
WebSocketShardEventsMap[K] extends [] ? {
|
||||
shardId: number;
|
||||
} : WebSocketShardEventsMap[K][0] & {
|
||||
shardId: number;
|
||||
}
|
||||
];
|
||||
};
|
||||
declare class WebSocketManager extends AsyncEventEmitter<ManagerShardEventsMap> {
|
||||
/**
|
||||
* The options being used by this manager
|
||||
*/
|
||||
readonly options: WebSocketManagerOptions;
|
||||
/**
|
||||
* Internal cache for a GET /gateway/bot result
|
||||
*/
|
||||
private gatewayInformation;
|
||||
/**
|
||||
* Internal cache for the shard ids
|
||||
*/
|
||||
private shardIds;
|
||||
/**
|
||||
* Strategy used to manage shards
|
||||
*
|
||||
* @defaultValue `SimpleShardingStrategy`
|
||||
*/
|
||||
private readonly strategy;
|
||||
constructor(options: Partial<OptionalWebSocketManagerOptions> & RequiredWebSocketManagerOptions);
|
||||
/**
|
||||
* Fetches the gateway information from Discord - or returns it from cache if available
|
||||
*
|
||||
* @param force - Whether to ignore the cache and force a fresh fetch
|
||||
*/
|
||||
fetchGatewayInformation(force?: boolean): Promise<APIGatewayBotInfo>;
|
||||
/**
|
||||
* Updates your total shard count on-the-fly, spawning shards as needed
|
||||
*
|
||||
* @param shardCount - The new shard count to use
|
||||
*/
|
||||
updateShardCount(shardCount: number | null): Promise<this>;
|
||||
/**
|
||||
* Yields the total number of shards across for your bot, accounting for Discord recommendations
|
||||
*/
|
||||
getShardCount(): Promise<number>;
|
||||
/**
|
||||
* Yields the ids of the shards this manager should manage
|
||||
*/
|
||||
getShardIds(force?: boolean): Promise<number[]>;
|
||||
connect(): Promise<void>;
|
||||
destroy(options?: Omit<WebSocketShardDestroyOptions, 'recover'>): Awaitable<void>;
|
||||
send(shardId: number, payload: GatewaySendPayload): Awaitable<void>;
|
||||
fetchStatus(): Awaitable<_discordjs_collection.Collection<number, WebSocketShardStatus>>;
|
||||
}
|
||||
|
||||
interface FetchingStrategyOptions extends Omit<WebSocketManagerOptions, 'buildIdentifyThrottler' | 'buildStrategy' | 'rest' | 'retrieveSessionInfo' | 'shardCount' | 'shardIds' | 'updateSessionInfo'> {
|
||||
readonly gatewayInformation: APIGatewayBotInfo;
|
||||
readonly shardCount: number;
|
||||
}
|
||||
/**
|
||||
* Strategies responsible solely for making manager information accessible
|
||||
*/
|
||||
interface IContextFetchingStrategy {
|
||||
readonly options: FetchingStrategyOptions;
|
||||
retrieveSessionInfo(shardId: number): Awaitable<SessionInfo | null>;
|
||||
updateSessionInfo(shardId: number, sessionInfo: SessionInfo | null): Awaitable<void>;
|
||||
/**
|
||||
* Resolves once the given shard should be allowed to identify, or rejects if the operation was aborted
|
||||
*/
|
||||
waitForIdentify(shardId: number, signal: AbortSignal): Promise<void>;
|
||||
}
|
||||
declare function managerToFetchingStrategyOptions(manager: WebSocketManager): Promise<FetchingStrategyOptions>;
|
||||
|
||||
declare class SimpleContextFetchingStrategy implements IContextFetchingStrategy {
|
||||
private readonly manager;
|
||||
readonly options: FetchingStrategyOptions;
|
||||
private static throttlerCache;
|
||||
private static ensureThrottler;
|
||||
constructor(manager: WebSocketManager, options: FetchingStrategyOptions);
|
||||
retrieveSessionInfo(shardId: number): Promise<SessionInfo | null>;
|
||||
updateSessionInfo(shardId: number, sessionInfo: SessionInfo | null): _discordjs_util.Awaitable<void>;
|
||||
waitForIdentify(shardId: number, signal: AbortSignal): Promise<void>;
|
||||
}
|
||||
|
||||
declare class WorkerContextFetchingStrategy implements IContextFetchingStrategy {
|
||||
readonly options: FetchingStrategyOptions;
|
||||
private readonly sessionPromises;
|
||||
private readonly waitForIdentifyPromises;
|
||||
constructor(options: FetchingStrategyOptions);
|
||||
retrieveSessionInfo(shardId: number): Promise<SessionInfo | null>;
|
||||
updateSessionInfo(shardId: number, sessionInfo: SessionInfo | null): void;
|
||||
waitForIdentify(shardId: number, signal: AbortSignal): Promise<void>;
|
||||
}
|
||||
|
||||
interface WorkerData extends FetchingStrategyOptions {
|
||||
shardIds: number[];
|
||||
}
|
||||
declare enum WorkerSendPayloadOp {
|
||||
Connect = 0,
|
||||
Destroy = 1,
|
||||
Send = 2,
|
||||
SessionInfoResponse = 3,
|
||||
ShardIdentifyResponse = 4,
|
||||
FetchStatus = 5
|
||||
}
|
||||
type WorkerSendPayload = {
|
||||
nonce: number;
|
||||
ok: boolean;
|
||||
op: WorkerSendPayloadOp.ShardIdentifyResponse;
|
||||
} | {
|
||||
nonce: number;
|
||||
op: WorkerSendPayloadOp.FetchStatus;
|
||||
shardId: number;
|
||||
} | {
|
||||
nonce: number;
|
||||
op: WorkerSendPayloadOp.SessionInfoResponse;
|
||||
session: SessionInfo | null;
|
||||
} | {
|
||||
op: WorkerSendPayloadOp.Connect;
|
||||
shardId: number;
|
||||
} | {
|
||||
op: WorkerSendPayloadOp.Destroy;
|
||||
options?: WebSocketShardDestroyOptions;
|
||||
shardId: number;
|
||||
} | {
|
||||
op: WorkerSendPayloadOp.Send;
|
||||
payload: GatewaySendPayload;
|
||||
shardId: number;
|
||||
};
|
||||
declare enum WorkerReceivePayloadOp {
|
||||
Connected = 0,
|
||||
Destroyed = 1,
|
||||
Event = 2,
|
||||
RetrieveSessionInfo = 3,
|
||||
UpdateSessionInfo = 4,
|
||||
WaitForIdentify = 5,
|
||||
FetchStatusResponse = 6,
|
||||
WorkerReady = 7,
|
||||
CancelIdentify = 8
|
||||
}
|
||||
type WorkerReceivePayload = {
|
||||
data: any;
|
||||
event: WebSocketShardEvents;
|
||||
op: WorkerReceivePayloadOp.Event;
|
||||
shardId: number;
|
||||
} | {
|
||||
nonce: number;
|
||||
op: WorkerReceivePayloadOp.CancelIdentify;
|
||||
} | {
|
||||
nonce: number;
|
||||
op: WorkerReceivePayloadOp.FetchStatusResponse;
|
||||
status: WebSocketShardStatus;
|
||||
} | {
|
||||
nonce: number;
|
||||
op: WorkerReceivePayloadOp.RetrieveSessionInfo;
|
||||
shardId: number;
|
||||
} | {
|
||||
nonce: number;
|
||||
op: WorkerReceivePayloadOp.WaitForIdentify;
|
||||
shardId: number;
|
||||
} | {
|
||||
op: WorkerReceivePayloadOp.Connected;
|
||||
shardId: number;
|
||||
} | {
|
||||
op: WorkerReceivePayloadOp.Destroyed;
|
||||
shardId: number;
|
||||
} | {
|
||||
op: WorkerReceivePayloadOp.UpdateSessionInfo;
|
||||
session: SessionInfo | null;
|
||||
shardId: number;
|
||||
} | {
|
||||
op: WorkerReceivePayloadOp.WorkerReady;
|
||||
};
|
||||
/**
|
||||
* Options for a {@link WorkerShardingStrategy}
|
||||
*/
|
||||
interface WorkerShardingStrategyOptions {
|
||||
/**
|
||||
* Dictates how many shards should be spawned per worker thread.
|
||||
*/
|
||||
shardsPerWorker: number | 'all';
|
||||
/**
|
||||
* Path to the worker file to use. The worker requires quite a bit of setup, it is recommended you leverage the {@link WorkerBootstrapper} class.
|
||||
*/
|
||||
workerPath?: string;
|
||||
}
|
||||
/**
|
||||
* Strategy used to spawn threads in worker_threads
|
||||
*/
|
||||
declare class WorkerShardingStrategy implements IShardingStrategy {
|
||||
#private;
|
||||
private readonly manager;
|
||||
private readonly options;
|
||||
private readonly connectPromises;
|
||||
private readonly destroyPromises;
|
||||
private readonly fetchStatusPromises;
|
||||
private readonly waitForIdentifyControllers;
|
||||
private throttler?;
|
||||
constructor(manager: WebSocketManager, options: WorkerShardingStrategyOptions);
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.spawn}
|
||||
*/
|
||||
spawn(shardIds: number[]): Promise<void>;
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.connect}
|
||||
*/
|
||||
connect(): Promise<void>;
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.destroy}
|
||||
*/
|
||||
destroy(options?: Omit<WebSocketShardDestroyOptions, 'recover'>): Promise<void>;
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.send}
|
||||
*/
|
||||
send(shardId: number, data: GatewaySendPayload): void;
|
||||
/**
|
||||
* {@inheritDoc IShardingStrategy.fetchStatus}
|
||||
*/
|
||||
fetchStatus(): Promise<Collection<number, WebSocketShardStatus>>;
|
||||
private setupWorker;
|
||||
private resolveWorkerPath;
|
||||
private waitForWorkerReady;
|
||||
private onMessage;
|
||||
private ensureThrottler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for bootstrapping the worker
|
||||
*/
|
||||
interface BootstrapOptions {
|
||||
/**
|
||||
* Shard events to just arbitrarily forward to the parent thread for the manager to emit
|
||||
* Note: By default, this will include ALL events
|
||||
* you most likely want to handle dispatch within the worker itself
|
||||
*/
|
||||
forwardEvents?: WebSocketShardEvents[];
|
||||
/**
|
||||
* Function to call when a shard is created for additional setup
|
||||
*/
|
||||
shardCallback?(shard: WebSocketShard): Awaitable<void>;
|
||||
}
|
||||
/**
|
||||
* Utility class for bootstrapping a worker thread to be used for sharding
|
||||
*/
|
||||
declare class WorkerBootstrapper {
|
||||
/**
|
||||
* The data passed to the worker thread
|
||||
*/
|
||||
protected readonly data: WorkerData;
|
||||
/**
|
||||
* The shards that are managed by this worker
|
||||
*/
|
||||
protected readonly shards: Collection<number, WebSocketShard>;
|
||||
constructor();
|
||||
/**
|
||||
* Helper method to initiate a shard's connection process
|
||||
*/
|
||||
protected connect(shardId: number): Promise<void>;
|
||||
/**
|
||||
* Helper method to destroy a shard
|
||||
*/
|
||||
protected destroy(shardId: number, options?: WebSocketShardDestroyOptions): Promise<void>;
|
||||
/**
|
||||
* Helper method to attach event listeners to the parentPort
|
||||
*/
|
||||
protected setupThreadEvents(): void;
|
||||
/**
|
||||
* Bootstraps the worker thread with the provided options
|
||||
*/
|
||||
bootstrap(options?: Readonly<BootstrapOptions>): Promise<void>;
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link https://github.com/discordjs/discord.js/blob/main/packages/ws/#readme | @discordjs/ws} version
|
||||
* that you are currently using.
|
||||
*/
|
||||
declare const version: string;
|
||||
|
||||
export { BootstrapOptions, CloseCodes, CompressionMethod, DefaultDeviceProperty, DefaultWebSocketManagerOptions, Encoding, FetchingStrategyOptions, IContextFetchingStrategy, IIdentifyThrottler, IShardingStrategy, IdentifyState, ImportantGatewayOpcodes, ManagerShardEventsMap, OptionalWebSocketManagerOptions, RequiredWebSocketManagerOptions, SendRateLimitState, SessionInfo, ShardRange, SimpleContextFetchingStrategy, SimpleIdentifyThrottler, SimpleShardingStrategy, WebSocketManager, WebSocketManagerOptions, WebSocketShard, WebSocketShardDestroyOptions, WebSocketShardDestroyRecovery, WebSocketShardEvents, WebSocketShardEventsMap, WebSocketShardStatus, WorkerBootstrapper, WorkerContextFetchingStrategy, WorkerData, WorkerReceivePayload, WorkerReceivePayloadOp, WorkerSendPayload, WorkerSendPayloadOp, WorkerShardingStrategy, WorkerShardingStrategyOptions, getInitialSendRateLimitState, managerToFetchingStrategyOptions, version };
|
1409
node_modules/@discordjs/ws/dist/index.js
generated
vendored
Normal file
1409
node_modules/@discordjs/ws/dist/index.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
node_modules/@discordjs/ws/dist/index.js.map
generated
vendored
Normal file
1
node_modules/@discordjs/ws/dist/index.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1373
node_modules/@discordjs/ws/dist/index.mjs
generated
vendored
Normal file
1373
node_modules/@discordjs/ws/dist/index.mjs
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
node_modules/@discordjs/ws/dist/index.mjs.map
generated
vendored
Normal file
1
node_modules/@discordjs/ws/dist/index.mjs.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
Loading…
Add table
Add a link
Reference in a new issue