diff --git a/README.md b/README.md index e9c866f..deb1dac 100644 --- a/README.md +++ b/README.md @@ -53,12 +53,12 @@ const pechkin = require('pechkin'); const { parseFormData } = require('pechkin'); ``` -## [Essential: save to random temp location](./examples/basic-fs-temp.js) +## [Save to file system](./examples/fs.js) **Files are processed sequentially.** ```js -// Full working example: `examples/basic-fs-temp.js` +// Full working example: `examples/fs.js` http.createServer(async (req, res) => { const { fields, files } = await pechkin.parseFormData(req, { @@ -69,17 +69,29 @@ http.createServer(async (req, res) => { const results = []; - for await (const { filename: originalFilename, byteLength, stream, ...file } of files) { + for await (const { filename: originalFilename, stream, ...file } of files) { const newFilename = `${Math.round(Math.random() * 1000)}-${originalFilename}`; const dest = path.join(os.tmpdir(), newFilename); + // Pipe the stream to a file + // The stream will start to be consumed after the current block of code + // finishes executing... stream.pipe(fs.createWriteStream(dest)); - /* - `byteSize` resolves only after the entire `file.stream` has been consumed - You should `await byteSize` only AFTER the code that consumes the stream - (e.g. uploading to AWS S3, loading into memory, etc.) - */ - const length = await byteLength; + + // ...which allows us to set up event handlers for the stream and wrap + // the whole thing in a Promise, so that we can get the stream's length. + const length = await new Promise((resolve, reject) => { + stream + // `stream` is an instance of Transform, which is a Duplex stream, + // which means you can listen to both 'end' (Readable side) + // and 'finish' (Writable side) events. + .on('end', () => resolve(stream.bytesWritten)) + .on('finish', () => resolve(stream.bytesWritten)) + // You can either reject the Promise and handle the Promise rejection + // using .catch() or await + try-catch block, or you can directly + // somehow handle the error in the 'error' event handler. + .on('error', reject); + }) results.push({ ...file, dest, originalFilename, newFilename, length}); } @@ -108,7 +120,7 @@ http.createServer(async (req, res) => { }); ``` -## [Essential: processing files sequentially (get SHA-256 hash)](./examples/sequential.js) +## Processing files sequentially (get SHA-256 hash) In this example, we iterate over all files sequentially, and process them one by one – the next file is accessed and processed only after the previous file is done. Processing here will be calculating a SHA-256 hash from the stream. @@ -125,7 +137,7 @@ Boilerplate code */ const fileHashes = []; -for await (const { stream, field, filename, byteLength, mimeType } of files) { +for await (const { stream, field, filename, mimeType } of files) { // `Hash` class: https://nodejs.org/api/crypto.html#class-hash const hash = createHash('sha256'); @@ -139,24 +151,51 @@ for await (const { stream, field, filename, byteLength, mimeType } of files) { field, filename, mimeType, - // Remember to always `await` the `byteLength` promise AFTER the stream has been consumed! - length: await byteLength, + // Here, we expect the stream to be fully consumed by `for-await-of` loop, + // so there's no need to wait for the 'end'/'finish' events to obtain the correct + // byte length of the stream – bytesWritten already reached its final value. + + // This is in contrast with the previous file system example, where we + // need to wait for the 'end'/'finish' events to obtain the correct + // byte length of the stream. + length: stream.bytesWritten, hash: hash.digest('hex'), }); } ``` -## [Advanced: processing files in batches (upload to AWS S3)](./examples/batch-upload-s3.js) - -In this example, we process files in batches of three – the next batch of files is accessed and processed only after the previous batch is done. -Processing here will be uploading the files to AWS S3. +## [Processing files in batches (upload to AWS S3)](./examples/s3.js) ```js -// FULL WORKING EXAMPLE: `examples/batch-upload-s3.js` +// Full working example: `examples/s3.js` -import { createHash } from 'crypto'; +// S3 setup and upload utility function -// ... Boilerplate code ... +const { S3Client } = require("@aws-sdk/client-s3"); +const { Upload } = require('@aws-sdk/lib-storage'); + +const s3Client = new S3Client({ + credentials: { + accessKeyId: "PROVIDE_YOUR_OWN", + secretAccessKey: "PROVIDE_YOUR_OWN", + }, + region: "us-east-1", +}); + +function uploadFileToS3(key, stream) { + const upload = new Upload({ + client: s3Client, + params: { + Key: key, + Bucket: "vrjam-firstbridge", + Body: stream, + } + }); + + return upload.done(); +} + +// ... const { fields, files } = await pechkin.parseFormData(req, { maxTotalFileFieldCount: Infinity, @@ -171,15 +210,23 @@ let i = 0; for await (const { filename: originalFilename, stream, field } of files) { const key = `${i}-${originalFilename}`; - batch.push( - uploadFileToS3(key, stream) // Check implementation in - .then(({ Location }) => ({ field, originalFilename, location: Location })) + + results.push( + await uploadFileToS3(key, stream) + .then(({ Location }) => ({ + field, + originalFilename, + location: Location, + // Here, we expect the stream to be fully consumed by `uploadFileToS3()`, + // so there's no need to wait for the 'end'/'finish' events to obtain the correct + // byte length of the stream – bytesWritten already reached its final value. + + // This is in contrast with the example in `examples/fs.js`, where we + // need to wait for the 'end'/'finish' events to obtain the correct + // byte length of the stream. + length: stream.bytesWritten, + })) ); - - if (batch.length === 3) { - results.push(await Promise.all(batch)); - batch = []; // restart batch - } i++; } @@ -242,12 +289,12 @@ app.post( async (req, res) => { const files = []; - for await (const { stream, field, filename, byteLength } of req.files) { + for await (const { stream, field, filename } of req.files) { // Process files however you see fit... // Here, streams are simply skipped stream.resume(); - files.push({ field, filename, length: await byteLength }); + files.push({ field, filename }); } return res.json({ fields: req.body, files }); @@ -404,7 +451,7 @@ type Files = { > > The `file.stream` should always be consumed, otherwise the request parsing will hang, and you might never get access to the next file. If you don't care about a particular file, you can simply do `file.stream.resume()`, but the stream should **always** be consumed. -### (Internal) error handling +### (Internal) Error handling inside `Pechkin::FileIterator`` This section is for those who want to know how errors are handled internally. This is not necessary to use `pechkin`. @@ -424,8 +471,7 @@ This section is for those who want to know how errors are handled internally. Th encoding: string; mimeType: string; field: string; - byteLength: Promise; - stream: stream.Readable; + stream: ByteLengthTruncateStream; // See below: "Type: ByteLengthTruncateStream" } ``` @@ -433,29 +479,27 @@ This section is for those who want to know how errors are handled internally. Th - `encoding`: The encoding of the file. [List of encodings](https://nodejs.org/api/buffer.html#buffers-and-character-encodings) supported by Node.js. - `mimeType`: The MIME type of the file. If the MIME type is crucial for your application, you should not trust the client-provided `mimeType` value – the client can easily lie about it (e.g. send an `.exe` file with `mimeType: "image/png"`). Instead, you should use a library like [`file-type`](https://github.com/sindresorhus/file-type). - `field`: The name of the field the file was sent in. -- `byteLength`: - - If `maxFileByteLength` **is exceeded**: - - If `abortOnFileByteLengthLimit === true`: A `Promise` that **rejects** with a `FieldLimitError` error of type `maxFileByteLength`. - - If `abortOnFileByteLengthLimit === false`: A `Promise` that **resolves** with an object of type `FileByteLengthInfo`: - ```ts - { - truncated: true; - readBytes: number; // always equal to `maxFileByteLength` for the field - } - ``` - The file stream will be truncated to `readBytes` bytes, so `readBytes` in this situation always equals the `maxFileByteLength` limit for the field. - - If `maxFileByteLength` is **not** exceeded: - ```ts - { - truncated: false; - readBytes: number; - } - ``` - Where readBytes equals to the actual number of bytes read from the stream. - - > 📝 Note: `byteLength` is encoding-agnostic. - `stream`: The file `Readable` stream. The stream should **always** be consumed, otherwise the request parsing will hang, and you might never get access to the next file. If you don't care about a particular file, you can simply do `file.stream.resume()`, but the stream should **always** be consumed. - > ❗️ **Very important note on `stream`:** - > - > The `file.stream` should always be consumed, otherwise the request parsing will hang, and you might never get access to the next file. If you don't care about a particular file, you can simply do `file.stream.resume()`, but the stream should **always** be consumed. +## Type: `ByteLengthTruncateStream` + +A [`Transform`](https://nodejs.org/api/stream.html#stream_class_stream_transform) stream, which does the following to source streams piped into it: +- Does nothing, i.e. acts as a `PassThrough` stream, as long as the source stream hasn't reached `maxFileByteLength` limit bytes. +- As soon as the source stream reaches `maxFileByteLength` limit bytes: + - Sets the `truncated` property to `true` + - Throws if `abortOnFileByteLimit = true` + - Truncates the file if `abortOnFileByteLimit = false` + +```ts +Transform & { + bytesRead: number; + bytesWritten: number; + truncated: boolean; +} +``` + +- `bytesRead`: The number of bytes read from the source stream. +- `bytesWritten`: The number of bytes written to the destination stream. +- `truncated`: Whether the file was truncated or not. Truncation only happens with `abortOnFileByteLimit = false`. `bytesRead - bytesWritten` is the number of bytes truncated, and is larger than `0` only if `truncated = true`, and `0` if `truncated = false`. + +All of the above properties are updated in real time, as the stream is consumed. This means that you have to wait until the stream is fully consumed (i.e. `'finish'`/`'end'` events are emitted, after e.g. an upload to file system or S3) to get the final values of `bytesRead`, `bytesWritten` and `truncated`. diff --git a/examples/express.js b/examples/express.js index acc9230..007bc9f 100644 --- a/examples/express.js +++ b/examples/express.js @@ -26,12 +26,12 @@ app.post( async (req, res) => { const files = []; - for await (const { stream, field, filename, byteLength } of req.files) { + for await (const { stream, field, filename } of req.files) { // Process files however you see fit... // Here, streams are simply skipped stream.resume(); - files.push({ field, filename, length: await byteLength }); + files.push({ field, filename }); } return res.json({ fields: req.body, files }); diff --git a/examples/basic-fs-temp.js b/examples/fs.js similarity index 56% rename from examples/basic-fs-temp.js rename to examples/fs.js index 3afca52..dfb77fd 100644 --- a/examples/basic-fs-temp.js +++ b/examples/fs.js @@ -3,8 +3,11 @@ const http = require('http'); const os = require('os'); const path = require('path'); -// If 'Pechkin' is installed, you can simply "require('pechkin')" -// or import * as pechkin from 'pechkin'; +// If 'pechkin' is installed as an NPM package, +// you can simply `const pechkin = require('pechkin')` +// or `import * as pechkin from 'pechkin';` + +// Use the dist/esm distribution if you're using ESM modules (import) const pechkin = require('../dist/cjs'); http @@ -18,17 +21,29 @@ http const results = []; - for await (const { filename: originalFilename, byteLength, stream, ...file } of files) { + for await (const { filename: originalFilename, stream, ...file } of files) { const newFilename = `${Math.round(Math.random() * 1000)}-${originalFilename}`; const dest = path.join(os.tmpdir(), newFilename); + // Pipe the stream to a file + // The stream will start to be consumed after the current block of code + // finishes executing... stream.pipe(fs.createWriteStream(dest)); - /* - `byteSize` resolves only after the entire `file.stream` has been consumed - You should `await byteSize` only AFTER the code that consumes the stream - (e.g. uploading to AWS S3, loading into memory, etc.) - */ - const length = await byteLength; + + // ...which allows us to set up event handlers for the stream and wrap + // the whole thing in a Promise, so that we can get the stream's length. + const length = await new Promise((resolve, reject) => { + stream + // `stream` is an instance of Transform, which is a Duplex stream, + // which means you can listen to both 'end' (Readable side) + // and 'finish' (Writable side) events. + .on('end', () => resolve(stream.bytesWritten)) + .on('finish', () => resolve(stream.bytesWritten)) + // You can either reject the Promise and handle the Promise rejection + // using .catch() or await + try-catch block, or you can directly + // somehow handle the error in the 'error' event handler. + .on('error', reject); + }) results.push({ ...file, dest, originalFilename, newFilename, length}); } diff --git a/examples/batch-upload-s3.js b/examples/s3.js similarity index 65% rename from examples/batch-upload-s3.js rename to examples/s3.js index 588c567..81a472e 100644 --- a/examples/batch-upload-s3.js +++ b/examples/s3.js @@ -3,8 +3,11 @@ const http = require("http"); const { S3Client } = require("@aws-sdk/client-s3"); const { Upload } = require('@aws-sdk/lib-storage'); -// If 'Pechkin' is installed, you can simply "require('pechkin')" -// or import * as pechkin from 'pechkin'; +// If 'pechkin' is installed as an NPM package, +// you can simply `const pechkin = require('pechkin')` +// or `import * as pechkin from 'pechkin';` + +// Use the dist/esm distribution if you're using ESM modules (import) const pechkin = require("../dist/cjs"); const s3Client = new S3Client({ @@ -44,15 +47,22 @@ http for await (const { filename: originalFilename, stream, field } of files) { const key = `${i}-${originalFilename}`; - batch.push( - uploadFileToS3(key, stream) - .then(({ Location }) => ({ field, originalFilename, location: Location })) - ); + results.push( + await uploadFileToS3(key, stream) + .then(({ Location }) => ({ + field, + originalFilename, + location: Location, + // Here, we expect the stream to be fully consumed by `uploadFileToS3()`, + // so there's no need to wait for the 'end'/'finish' events to obtain the correct + // byte length of the stream – bytesWritten already reached its final value. - if (batch.length === 3) { - results.push(await Promise.all(batch)); - batch = []; // restart batch - } + // This is in contrast with the example in `examples/fs.js`, where we + // need to wait for the 'end'/'finish' events to obtain the correct + // byte length of the stream. + length: stream.bytesWritten, + })) + ); i++; } diff --git a/src/ByteLengthTruncateStream.ts b/src/ByteLengthTruncateStream.ts index cbd7412..5ed7f84 100644 --- a/src/ByteLengthTruncateStream.ts +++ b/src/ByteLengthTruncateStream.ts @@ -1,10 +1,15 @@ -import { Readable, Transform, TransformCallback } from 'stream'; +import { Transform, TransformCallback } from 'stream'; import { FieldLimitError } from './error'; export class ByteLengthTruncateStream extends Transform { + private _bytesRead: number = 0; private _bytesWritten: number = 0; private _truncated: boolean = false; + get bytesRead(): number { + return this._bytesRead; + } + get bytesWritten(): number { return this._bytesWritten; } @@ -13,23 +18,6 @@ export class ByteLengthTruncateStream extends Transform { return this._truncated; } - public on(event: 'byteLength', listener: (bytesWritten: number) => void): this; - public on(event: 'close', listener: () => void): this; - public on(event: 'data', listener: (chunk: any) => void): this; - public on(event: 'end', listener: () => void): this; - public on(event: 'error', listener: (err: Error) => void): this; - public on(event: 'pause', listener: () => void): this; - public on(event: 'readable', listener: () => void): this; - public on(event: 'resume', listener: () => void): this; - public on(event: 'close', listener: () => void): this; - public on(event: 'drain', listener: () => void): this; - public on(event: 'finish', listener: () => void): this; - public on(event: 'pipe', listener: (src: Readable) => void): this; - public on(event: 'unpipe', listener: (src: Readable) => void): this; - public on(event: string | symbol, listener: (...args: any[]) => void): this { - return super.on(event, listener); - } - constructor( private readonly limit: number, private readonly abortOnFileByteLengthLimit: boolean, @@ -40,13 +28,15 @@ export class ByteLengthTruncateStream extends Transform { // encoding = 'buffer': https://nodejs.org/api/stream.html#transform_transformchunk-encoding-callback public _transform(chunk: Buffer | string, encoding: BufferEncoding | 'buffer', callback: TransformCallback): void { - if (this._truncated) { - return callback(); - } - const chunkBuffer = encoding === 'buffer' ? chunk as Buffer : Buffer.from(chunk as string, encoding); + + this._bytesRead += chunkBuffer.byteLength; + + if (this._truncated) { + return callback(); + } if (this._bytesWritten + chunkBuffer.byteLength > this.limit) { const truncatedChunk = chunkBuffer.subarray(0, this.limit - this._bytesWritten); @@ -66,15 +56,4 @@ export class ByteLengthTruncateStream extends Transform { return callback(); } } - - public _flush(callback: TransformCallback): void { - this.emit('byteLength', this._bytesWritten); - - return callback(); - } -} - -export type FileByteLengthInfo = { - readBytes: number; - truncated: boolean; -}; \ No newline at end of file +} \ No newline at end of file diff --git a/src/FileIterator.ts b/src/FileIterator.ts index 631164e..57d64ad 100644 --- a/src/FileIterator.ts +++ b/src/FileIterator.ts @@ -1,7 +1,7 @@ import busboy from "busboy"; import { on, Readable } from "stream"; -import { ByteLengthTruncateStream, FileByteLengthInfo } from "./ByteLengthTruncateStream"; +import { ByteLengthTruncateStream } from "./ByteLengthTruncateStream"; import { TotalLimitError } from "./error"; import { FileCounter } from "./FileCounter"; import { Internal } from "./types"; @@ -126,51 +126,10 @@ function processBusboyFileEventPayload( return { field, stream: truncatedStream, - // Why getter and not a property? - // So that byteLength() is called lazily. byteLength() has a side effect: it sets - // the 'error' event listener on the stream. If we 'prematurely' set the 'error' event listener - // as a part of byteLength property, but don't error-handle byteLength, we will get silenced errors. - // https://github.com/rafasofizada/pechkin/issues/2 - get byteLength() { - return byteLength(truncatedStream) - }, ...info, }; } -export function byteLength(stream: ByteLengthTruncateStream): Promise { - return new Promise((resolve, reject) => { - let eventBeforeCloseEmitted = false; - - const payload = (stream: ByteLengthTruncateStream) => ({ - readBytes: stream.bytesWritten, - truncated: stream.truncated, - }); - - stream - .on('error', (error) => { - eventBeforeCloseEmitted = true; - return reject(error); - }) - .on('close', () => { - if (!eventBeforeCloseEmitted) { - console.warn(`Pechkin::ByteLengthLimitStream closed, without 'finish' & 'byteLength' or 'error' events -firing beforehand. This should not happen. Probable cause: stream was destroy()'ed.`); - } - - resolve(payload(stream)); - }) - .on('finish', () => { - eventBeforeCloseEmitted = true; - return resolve({ - readBytes: stream.bytesWritten, - truncated: stream.truncated, - }); - }) - .on('byteLength', () => resolve(payload(stream))); - }); -} - /* > eIter = events.on(target, event, handler); diff --git a/src/types.ts b/src/types.ts index 5216894..3c7037f 100644 --- a/src/types.ts +++ b/src/types.ts @@ -1,15 +1,12 @@ import * as busboy from "busboy"; -import { Readable } from "stream"; - -import { FileByteLengthInfo } from "./ByteLengthTruncateStream"; +import { ByteLengthTruncateStream } from "./ByteLengthTruncateStream"; export namespace Internal { export type Fields = Record; export type File = busboy.FileInfo & { field: string; - byteLength: Promise; - stream: Readable; + stream: ByteLengthTruncateStream; }; export type Files = Required>; diff --git a/test/ByteLengthLimitStream.spec.ts b/test/ByteLengthLimitStream.spec.ts index e4e660c..7889caf 100644 --- a/test/ByteLengthLimitStream.spec.ts +++ b/test/ByteLengthLimitStream.spec.ts @@ -1,166 +1,45 @@ -import { Readable } from 'stream'; import { describe, it, expect } from 'vitest'; import { FieldLimitError } from '../src'; import { ByteLengthTruncateStream } from '../src/ByteLengthTruncateStream'; -import { byteLength } from '../src/FileIterator'; describe('ByteLengthTruncateStream', () => { - describe('_transform', () => { - it('should pass through chunk if limit is not exceeded', () => new Promise((resolve, reject) => { - const limit = 100; - const field = 'content'; - const stream = new ByteLengthTruncateStream(limit, true, field); - - const data = 'This is a short chunk of data.'; - - stream.on('data', (chunk) => { - expect(chunk.toString()).toBe(data); - resolve(); - }); - - stream.write(data); - })); - - it('should truncate chunk and emit error if limit is exceeded', () => new Promise((resolve, reject) => { - const limit = 10; - const field = 'content'; - const stream = new ByteLengthTruncateStream(limit, true, field); - - const data = 'This is a long chunk of data.'; - const expectedTruncatedData = 'This is a '; - const expectedError = new FieldLimitError('maxFileByteLength', field, limit); - - const chunks: any[] = []; - - stream.on('data', (chunk) => { - chunks.push(chunk); - }); - - stream.on('error', (error) => { - expect(error).toEqual(expectedError); - expect(Buffer.concat(chunks).toString()).toEqual(expectedTruncatedData); - resolve(); - }); - - stream.write(data); - })); - }); + it('should pass through chunk if limit is not exceeded', () => new Promise((resolve, reject) => { + const limit = 100; + const field = 'content'; + const stream = new ByteLengthTruncateStream(limit, true, field); - describe('byteLength event', () => { - it('should return the correct byte length', () => new Promise((resolve, reject) => { - const limit = 100; - const field = 'content'; - const stream = new ByteLengthTruncateStream(limit, true, field); - - const data = 'This is some data.'; - const expectedByteLength = Buffer.byteLength(data); - - stream - .on('byteLength', (byteLength) => { - expect(byteLength).toEqual(expectedByteLength); - resolve(); - }) - .on('error', (error) => { - reject(new Error('No error should be emitted: content < limit')); - console.error(error); - }) - .on('finish', () => { - reject(new Error('\'byteLength\' should be emitted before \'finish\'')); - }); - - stream.write(data); - stream.end(); - })); - - it('should not be emitted if limit exceeded', () => new Promise((resolve, reject) => { - const limit = 10; - const field = 'content'; - const stream = new ByteLengthTruncateStream(limit, true, field); - - const data = 'This is some data.'; - - stream.write(data); - stream.end(); - - stream - .on('byteLength', (byteLength) => { - reject(new Error('\'byteLength\' should not be emitted: content > limit, should error instead')) - }) - .on('error', (error) => { - expect(error).toEqual(new FieldLimitError('maxFileByteLength', field, limit)); - resolve(); - }); - })); - - it('should not set .on(\'error\') listener and catch errors, process.uncaughtException should fire', () => new Promise((resolve, reject) => { - const uncaughtExceptionEvents: Error[] = []; - process.on('uncaughtException', (error: Error) => { - uncaughtExceptionEvents.push(error); - }); - - // Create the ByteLengthTruncateStream instance - const limit = 10; - const field = 'content'; - const stream = new ByteLengthTruncateStream(limit, true, field); - - // Create a readable stream and pipe it to ByteLengthTruncateStream - const input = 'This is a long chunk of data.'; - const readableStream = Readable.from(input); - readableStream.pipe(stream); - - // Allow some time for the event loop to process data - setTimeout(() => { - try { - // Assert that the process.uncaughtException event was fired - expect(uncaughtExceptionEvents.length).toBeGreaterThan(0); - } catch (error) { - reject(error); - } - - process.removeAllListeners('uncaughtException'); - resolve(); - }, 500); - })); - - // Verifying expectations for stream behaviour - - it('is not available after the stream has been destroyed', () => new Promise((resolve, reject) => { - const limit = 100; - const field = 'content'; - const stream = new ByteLengthTruncateStream(limit, true, field); - - const data = 'This is some data.'; - - stream.write(data); - stream.destroy(); - - stream - .on('byteLength', () => { - reject(new Error('\'byteLength\' should not be emitted: stream destroyed')) - }) - .on('close', () => { - resolve(); - }); - })); - - it('is not available after the stream has been ended', () => new Promise((resolve, reject) => { - const limit = 100; - const field = 'content'; - const stream = new ByteLengthTruncateStream(limit, true, field); - - const data = 'This is some data.'; - - stream.write(data); - stream.end(); + const data = 'This is a short chunk of data.'; + + stream.on('data', (chunk) => { + expect(chunk.toString()).toBe(data); + resolve(); + }); + + stream.write(data); + })); - stream - .on('byteLength', () => { - reject(new Error('\'byteLength\' should not be emitted: stream destroyed')) - }) - .on('finish', () => { - resolve(); - }); - })); - }); + it('should truncate chunk and emit error if limit is exceeded', () => new Promise((resolve, reject) => { + const limit = 10; + const field = 'content'; + const stream = new ByteLengthTruncateStream(limit, true, field); + + const data = 'This is a long chunk of data.'; + const expectedTruncatedData = 'This is a '; + const expectedError = new FieldLimitError('maxFileByteLength', field, limit); + + const chunks: any[] = []; + + stream.on('data', (chunk) => { + chunks.push(chunk); + }); + + stream.on('error', (error) => { + expect(error).toEqual(expectedError); + expect(Buffer.concat(chunks).toString()).toEqual(expectedTruncatedData); + resolve(); + }); + + stream.write(data); + })); }); diff --git a/test/files.spec.ts b/test/files.spec.ts index a9e4ea4..1074a4f 100644 --- a/test/files.spec.ts +++ b/test/files.spec.ts @@ -44,37 +44,37 @@ describe('Files', () => { expect.objectContaining({ field: 'truncateAll', content: 'truncated', - byteLength: { truncated: true, readBytes: 9 }, + byteLength: { truncated: true, bytesWritten: 9, bytesRead: 13 }, }), expect.objectContaining({ field: 'truncateAll', content: 'truncated', - byteLength: { truncated: true, readBytes: 9 }, + byteLength: { truncated: true, bytesWritten: 9, bytesRead: 13 }, }), expect.objectContaining({ field: 'truncateSome', content: 'not trunc', - byteLength: { truncated: false, readBytes: 9 }, + byteLength: { truncated: false, bytesWritten: 9, bytesRead: 9 }, }), expect.objectContaining({ field: 'truncateSome', content: 'truncated', - byteLength: { truncated: true, readBytes: 9 }, + byteLength: { truncated: true, bytesWritten: 9, bytesRead: 13 }, }), expect.objectContaining({ field: 'truncateSingle', content: 'truncated', - byteLength: { truncated: true, readBytes: 9 }, + byteLength: { truncated: true, bytesWritten: 9, bytesRead: 13 }, }), expect.objectContaining({ field: 'noTruncation', content: 'not trunc', - byteLength: { truncated: false, readBytes: 9 }, + byteLength: { truncated: false, bytesWritten: 9, bytesRead: 9 }, }), expect.objectContaining({ field: 'noTruncation', content: 'no trunca', - byteLength: { truncated: false, readBytes: 9 }, + byteLength: { truncated: false, bytesWritten: 9, bytesRead: 9 }, }), ]); }); @@ -100,17 +100,17 @@ describe('Files', () => { expect.objectContaining({ field: 'dontTruncate', content: 'should not be truncated', - byteLength: { truncated: false, readBytes: 23 }, + byteLength: { truncated: false, bytesWritten: 23, bytesRead: 23 }, }), expect.objectContaining({ field: 'truncate', content: 'should be', - byteLength: { truncated: true, readBytes: 9 }, + byteLength: { truncated: true, bytesWritten: 9, bytesRead: 19 }, }), expect.objectContaining({ field: 'truncateLonger', content: 'should be trunc', - byteLength: { truncated: true, readBytes: 15 }, + byteLength: { truncated: true, bytesWritten: 15, bytesRead: 19 }, }), ]); }); diff --git a/test/util.ts b/test/util.ts index bf47d51..ab73ef0 100644 --- a/test/util.ts +++ b/test/util.ts @@ -6,13 +6,15 @@ import { expect } from 'vitest'; import { parseFormData, Pechkin } from '../src'; import { Internal } from '../src/types'; -import { FileByteLengthInfo } from '../src/ByteLengthTruncateStream'; -process.on("uncaughtException", (error) => { - console.log('UNCAUGHT 😱', error); -}); - -export type TestFile = Omit & { content: string | null; byteLength: FileByteLengthInfo; }; +export type TestFile = Omit & { + content: string | null; + byteLength: { + bytesWritten: number; + bytesRead: number; + truncated: boolean; + }; +}; export type TestFormDataFields = `${S}__file` | `${S}__field`; export type TestFormDataPayload = Record; export type TestFormDataParseResult = { fields: Internal.Fields, files: TestFile[] }; @@ -55,13 +57,15 @@ export async function createParseFormData( const results = [] as TestFile[]; - for await (const { stream, byteLength, ...restFile } of files) { + for await (const { stream, ...restFile } of files) { const result = { ...restFile, - content: stream - ? await streamToString(stream) - : null, - byteLength: await byteLength, + content: await streamToString(stream), + byteLength: { + bytesWritten: stream.bytesWritten, + bytesRead: stream.bytesRead, + truncated: stream.truncated, + }, }; results.push(result);