download.js 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. "use strict";
  2. Object.defineProperty(exports, "__esModule", { value: true });
  3. exports.GridFSBucketReadStream = void 0;
  4. const stream_1 = require("stream");
  5. const abstract_cursor_1 = require("../cursor/abstract_cursor");
  6. const error_1 = require("../error");
  7. const timeout_1 = require("../timeout");
  8. /**
  9. * A readable stream that enables you to read buffers from GridFS.
  10. *
  11. * Do not instantiate this class directly. Use `openDownloadStream()` instead.
  12. * @public
  13. */
  14. class GridFSBucketReadStream extends stream_1.Readable {
  15. /**
  16. * Fires when the stream loaded the file document corresponding to the provided id.
  17. * @event
  18. */
  19. static { this.FILE = 'file'; }
  20. /**
  21. * @param chunks - Handle for chunks collection
  22. * @param files - Handle for files collection
  23. * @param readPreference - The read preference to use
  24. * @param filter - The filter to use to find the file document
  25. * @internal
  26. */
  27. constructor(chunks, files, readPreference, filter, options) {
  28. super({ emitClose: true });
  29. this.s = {
  30. bytesToTrim: 0,
  31. bytesToSkip: 0,
  32. bytesRead: 0,
  33. chunks,
  34. expected: 0,
  35. files,
  36. filter,
  37. init: false,
  38. expectedEnd: 0,
  39. options: {
  40. start: 0,
  41. end: 0,
  42. ...options
  43. },
  44. readPreference,
  45. timeoutContext: options?.timeoutMS != null
  46. ? new timeout_1.CSOTTimeoutContext({ timeoutMS: options.timeoutMS, serverSelectionTimeoutMS: 0 })
  47. : undefined
  48. };
  49. }
  50. /**
  51. * Reads from the cursor and pushes to the stream.
  52. * Private Impl, do not call directly
  53. * @internal
  54. */
  55. _read() {
  56. if (this.destroyed)
  57. return;
  58. waitForFile(this, () => doRead(this));
  59. }
  60. /**
  61. * Sets the 0-based offset in bytes to start streaming from. Throws
  62. * an error if this stream has entered flowing mode
  63. * (e.g. if you've already called `on('data')`)
  64. *
  65. * @param start - 0-based offset in bytes to start streaming from
  66. */
  67. start(start = 0) {
  68. throwIfInitialized(this);
  69. this.s.options.start = start;
  70. return this;
  71. }
  72. /**
  73. * Sets the 0-based offset in bytes to start streaming from. Throws
  74. * an error if this stream has entered flowing mode
  75. * (e.g. if you've already called `on('data')`)
  76. *
  77. * @param end - Offset in bytes to stop reading at
  78. */
  79. end(end = 0) {
  80. throwIfInitialized(this);
  81. this.s.options.end = end;
  82. return this;
  83. }
  84. /**
  85. * Marks this stream as aborted (will never push another `data` event)
  86. * and kills the underlying cursor. Will emit the 'end' event, and then
  87. * the 'close' event once the cursor is successfully killed.
  88. */
  89. async abort() {
  90. this.push(null);
  91. this.destroy();
  92. const remainingTimeMS = this.s.timeoutContext?.getRemainingTimeMSOrThrow();
  93. await this.s.cursor?.close({ timeoutMS: remainingTimeMS });
  94. }
  95. }
  96. exports.GridFSBucketReadStream = GridFSBucketReadStream;
  97. function throwIfInitialized(stream) {
  98. if (stream.s.init) {
  99. throw new error_1.MongoGridFSStreamError('Options cannot be changed after the stream is initialized');
  100. }
  101. }
  102. function doRead(stream) {
  103. if (stream.destroyed)
  104. return;
  105. if (!stream.s.cursor)
  106. return;
  107. if (!stream.s.file)
  108. return;
  109. const handleReadResult = (doc) => {
  110. if (stream.destroyed)
  111. return;
  112. if (!doc) {
  113. stream.push(null);
  114. stream.s.cursor?.close().then(undefined, error => stream.destroy(error));
  115. return;
  116. }
  117. if (!stream.s.file)
  118. return;
  119. const bytesRemaining = stream.s.file.length - stream.s.bytesRead;
  120. const expectedN = stream.s.expected++;
  121. const expectedLength = Math.min(stream.s.file.chunkSize, bytesRemaining);
  122. if (doc.n > expectedN) {
  123. return stream.destroy(new error_1.MongoGridFSChunkError(`ChunkIsMissing: Got unexpected n: ${doc.n}, expected: ${expectedN}`));
  124. }
  125. if (doc.n < expectedN) {
  126. return stream.destroy(new error_1.MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected: ${expectedN}`));
  127. }
  128. let buf = Buffer.isBuffer(doc.data) ? doc.data : doc.data.buffer;
  129. if (buf.byteLength !== expectedLength) {
  130. if (bytesRemaining <= 0) {
  131. return stream.destroy(new error_1.MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected file length ${stream.s.file.length} bytes but already read ${stream.s.bytesRead} bytes`));
  132. }
  133. return stream.destroy(new error_1.MongoGridFSChunkError(`ChunkIsWrongSize: Got unexpected length: ${buf.byteLength}, expected: ${expectedLength}`));
  134. }
  135. stream.s.bytesRead += buf.byteLength;
  136. if (buf.byteLength === 0) {
  137. return stream.push(null);
  138. }
  139. let sliceStart = null;
  140. let sliceEnd = null;
  141. if (stream.s.bytesToSkip != null) {
  142. sliceStart = stream.s.bytesToSkip;
  143. stream.s.bytesToSkip = 0;
  144. }
  145. const atEndOfStream = expectedN === stream.s.expectedEnd - 1;
  146. const bytesLeftToRead = stream.s.options.end - stream.s.bytesToSkip;
  147. if (atEndOfStream && stream.s.bytesToTrim != null) {
  148. sliceEnd = stream.s.file.chunkSize - stream.s.bytesToTrim;
  149. }
  150. else if (stream.s.options.end && bytesLeftToRead < doc.data.byteLength) {
  151. sliceEnd = bytesLeftToRead;
  152. }
  153. if (sliceStart != null || sliceEnd != null) {
  154. buf = buf.slice(sliceStart || 0, sliceEnd || buf.byteLength);
  155. }
  156. stream.push(buf);
  157. return;
  158. };
  159. stream.s.cursor.next().then(handleReadResult, error => {
  160. if (stream.destroyed)
  161. return;
  162. stream.destroy(error);
  163. });
  164. }
  165. function init(stream) {
  166. const findOneOptions = {};
  167. if (stream.s.readPreference) {
  168. findOneOptions.readPreference = stream.s.readPreference;
  169. }
  170. if (stream.s.options && stream.s.options.sort) {
  171. findOneOptions.sort = stream.s.options.sort;
  172. }
  173. if (stream.s.options && stream.s.options.skip) {
  174. findOneOptions.skip = stream.s.options.skip;
  175. }
  176. const handleReadResult = (doc) => {
  177. if (stream.destroyed)
  178. return;
  179. if (!doc) {
  180. const identifier = stream.s.filter._id
  181. ? stream.s.filter._id.toString()
  182. : stream.s.filter.filename;
  183. const errmsg = `FileNotFound: file ${identifier} was not found`;
  184. // TODO(NODE-3483)
  185. const err = new error_1.MongoRuntimeError(errmsg);
  186. err.code = 'ENOENT'; // TODO: NODE-3338 set property as part of constructor
  187. return stream.destroy(err);
  188. }
  189. // If document is empty, kill the stream immediately and don't
  190. // execute any reads
  191. if (doc.length <= 0) {
  192. stream.push(null);
  193. return;
  194. }
  195. if (stream.destroyed) {
  196. // If user destroys the stream before we have a cursor, wait
  197. // until the query is done to say we're 'closed' because we can't
  198. // cancel a query.
  199. stream.destroy();
  200. return;
  201. }
  202. try {
  203. stream.s.bytesToSkip = handleStartOption(stream, doc, stream.s.options);
  204. }
  205. catch (error) {
  206. return stream.destroy(error);
  207. }
  208. const filter = { files_id: doc._id };
  209. // Currently (MongoDB 3.4.4) skip function does not support the index,
  210. // it needs to retrieve all the documents first and then skip them. (CS-25811)
  211. // As work around we use $gte on the "n" field.
  212. if (stream.s.options && stream.s.options.start != null) {
  213. const skip = Math.floor(stream.s.options.start / doc.chunkSize);
  214. if (skip > 0) {
  215. filter['n'] = { $gte: skip };
  216. }
  217. }
  218. let remainingTimeMS;
  219. try {
  220. remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow(`Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms`);
  221. }
  222. catch (error) {
  223. return stream.destroy(error);
  224. }
  225. stream.s.cursor = stream.s.chunks
  226. .find(filter, {
  227. timeoutMode: stream.s.options.timeoutMS != null ? abstract_cursor_1.CursorTimeoutMode.LIFETIME : undefined,
  228. timeoutMS: remainingTimeMS
  229. })
  230. .sort({ n: 1 });
  231. if (stream.s.readPreference) {
  232. stream.s.cursor.withReadPreference(stream.s.readPreference);
  233. }
  234. stream.s.expectedEnd = Math.ceil(doc.length / doc.chunkSize);
  235. stream.s.file = doc;
  236. try {
  237. stream.s.bytesToTrim = handleEndOption(stream, doc, stream.s.cursor, stream.s.options);
  238. }
  239. catch (error) {
  240. return stream.destroy(error);
  241. }
  242. stream.emit(GridFSBucketReadStream.FILE, doc);
  243. return;
  244. };
  245. let remainingTimeMS;
  246. try {
  247. remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow(`Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms`);
  248. }
  249. catch (error) {
  250. if (!stream.destroyed)
  251. stream.destroy(error);
  252. return;
  253. }
  254. findOneOptions.timeoutMS = remainingTimeMS;
  255. stream.s.files.findOne(stream.s.filter, findOneOptions).then(handleReadResult, error => {
  256. if (stream.destroyed)
  257. return;
  258. stream.destroy(error);
  259. });
  260. }
  261. function waitForFile(stream, callback) {
  262. if (stream.s.file) {
  263. return callback();
  264. }
  265. if (!stream.s.init) {
  266. init(stream);
  267. stream.s.init = true;
  268. }
  269. stream.once('file', () => {
  270. callback();
  271. });
  272. }
  273. function handleStartOption(stream, doc, options) {
  274. if (options && options.start != null) {
  275. if (options.start > doc.length) {
  276. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be more than the length of the file (${doc.length})`);
  277. }
  278. if (options.start < 0) {
  279. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be negative`);
  280. }
  281. if (options.end != null && options.end < options.start) {
  282. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be greater than stream end (${options.end})`);
  283. }
  284. stream.s.bytesRead = Math.floor(options.start / doc.chunkSize) * doc.chunkSize;
  285. stream.s.expected = Math.floor(options.start / doc.chunkSize);
  286. return options.start - stream.s.bytesRead;
  287. }
  288. throw new error_1.MongoInvalidArgumentError('Start option must be defined');
  289. }
  290. function handleEndOption(stream, doc, cursor, options) {
  291. if (options && options.end != null) {
  292. if (options.end > doc.length) {
  293. throw new error_1.MongoInvalidArgumentError(`Stream end (${options.end}) must not be more than the length of the file (${doc.length})`);
  294. }
  295. if (options.start == null || options.start < 0) {
  296. throw new error_1.MongoInvalidArgumentError(`Stream end (${options.end}) must not be negative`);
  297. }
  298. const start = options.start != null ? Math.floor(options.start / doc.chunkSize) : 0;
  299. cursor.limit(Math.ceil(options.end / doc.chunkSize) - start);
  300. stream.s.expectedEnd = Math.ceil(options.end / doc.chunkSize);
  301. return Math.ceil(options.end / doc.chunkSize) * doc.chunkSize - options.end;
  302. }
  303. throw new error_1.MongoInvalidArgumentError('End option must be defined');
  304. }
  305. //# sourceMappingURL=download.js.map