目标:将大文件上传到AWS Glacier,而无需将整个文件保存在内存中。
我目前正在上传冰川现在使用fs.readFileSync()和事情正在工作。但是,我需要处理大于4GB的文件,并且我想并行上传多个块。这意味着转向多部分上传。我可以选择块大小,但冰川需要每个块都是相同的大小(最后一个除外)
这个线程建议我可以在读流上设置块大小,但我实际上不能保证得到它。
关于我如何能得到一致的部分没有读取整个文件到内存和手动分割它的任何信息?
假设我可以做到这一点,我只是打算使用集群,其中有几个进程尽可能快地从流中拉出它们可以上传到AWS。如果这似乎是错误的并行工作方式,我希望有建议。
如果没有别的,您可以手动使用fs.open()
, fs.read()
和fs.close()
。例子:
var CHUNK_SIZE = 10 * 1024 * 1024, // 10MB
buffer = Buffer.alloc(CHUNK_SIZE),
filePath = '/tmp/foo';
fs.open(filePath, 'r', function(err, fd) {
if (err) throw err;
function readNextChunk() {
fs.read(fd, buffer, 0, CHUNK_SIZE, null, function(err, nread) {
if (err) throw err;
if (nread === 0) {
// done reading file, do any necessary finalization steps
fs.close(fd, function(err) {
if (err) throw err;
});
return;
}
var data;
if (nread < CHUNK_SIZE)
data = buffer.slice(0, nread);
else
data = buffer;
// do something with `data`, then call `readNextChunk();`
});
}
readNextChunk();
});
您可以考虑使用下面的代码片段,其中我们以1024字节的块读取文件
var fs = require('fs');
var data = '';
var readStream = fs.createReadStream('/tmp/foo.txt',{ highWaterMark: 1 * 1024, encoding: 'utf8' });
readStream.on('data', function(chunk) {
data += chunk;
console.log('chunk Data : ')
console.log(chunk);// your processing chunk logic will go here
}).on('end', function() {
console.log('###################');
console.log(data);
// here you see all data processed at end of file
});
请注意:highWaterMark是用于块大小的参数希望这对你有帮助!
Web参考:https://stackabuse.com/read-files-with-node-js/更改readstream chunksize
基于mscdex的答案,这里是一个使用同步替代方案的模块,并使用StringDecoder正确解析UTF-8
readableStream
的问题是,为了使用它,你必须将整个项目转换为使用异步发射器&回调。如果你正在编写一些简单的代码,比如用nodejs编写一个小的CLI,那就没有意义了。
//usage
let file = new UTF8FileReader()
file.open('./myfile.txt', 1024)
while ( file.isOpen ) {
let stringData=file.readChunk()
console.log(stringData)
}
//--------------------
// UTF8FileReader.ts
//--------------------
import * as fs from 'fs';
import { StringDecoder, NodeStringDecoder } from "string_decoder";
export class UTF8FileReader {
filename: string;
isOpen: boolean = false;
private chunkSize: number;
private fd: number; //file handle from fs.OpenFileSync
private readFilePos: number;
private readBuffer: Buffer;
private utf8decoder: NodeStringDecoder
/**
* open the file | throw
* @param filename
*/
open(filename, chunkSize: number = 16 * 1024) {
this.chunkSize = chunkSize;
try {
this.fd = fs.openSync(filename, 'r');
}
catch (e) {
throw new Error("opening " + filename + ", error:" + e.toString());
}
this.filename = filename;
this.isOpen = true;
this.readBuffer = Buffer.alloc(this.chunkSize);
this.readFilePos = 0;
//a StringDecoder is a buffered object that ensures complete UTF-8 multibyte decoding from a byte buffer
this.utf8decoder = new StringDecoder('utf8')
}
/**
* read another chunk from the file
* return the decoded UTF8 into a string
* (or throw)
* */
readChunk(): string {
let decodedString = '' //return '' by default
if (!this.isOpen) {
return decodedString;
}
let readByteCount: number;
try {
readByteCount = fs.readSync(this.fd, this.readBuffer, 0, this.chunkSize, this.readFilePos);
}
catch (e) {
throw new Error("reading " + this.filename + ", error:" + e.toString());
}
if (readByteCount) {
//some data read, advance readFilePos
this.readFilePos += readByteCount;
//get only the read bytes (if we reached the end of the file)
const onlyReadBytesBuf = this.readBuffer.slice(0, readByteCount);
//correctly decode as utf8, and store in decodedString
//yes, the api is called "write", but it decodes a string - it's a write-decode-and-return the string kind-of-thing :)
decodedString = this.utf8decoder.write(onlyReadBytesBuf);
}
else {
//read returns 0 => all bytes read
this.close();
}
return decodedString
}
close() {
if (!this.isOpen) {
return;
}
fs.closeSync(this.fd);
this.isOpen = false;
this.utf8decoder.end();
}
}
如果你还没有typescript,这里是。js编译后的代码:
// UTF8FileReader.js
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.UTF8FileReader = void 0;
//--------------------
// UTF8FileReader
//--------------------
const fs = require("fs");
const string_decoder_1 = require("string_decoder");
class UTF8FileReader {
constructor() {
this.isOpen = false;
}
/**
* open the file | throw
* @param filename
*/
open(filename, chunkSize = 16 * 1024) {
this.chunkSize = chunkSize;
try {
this.fd = fs.openSync(filename, 'r');
}
catch (e) {
throw new Error("opening " + filename + ", error:" + e.toString());
}
this.filename = filename;
this.isOpen = true;
this.readBuffer = Buffer.alloc(this.chunkSize);
this.readFilePos = 0;
//a StringDecoder is a buffered object that ensures complete UTF-8 multibyte decoding from a byte buffer
this.utf8decoder = new string_decoder_1.StringDecoder('utf8');
}
/**
* read another chunk from the file
* return the decoded UTF8 into a string
* (or throw)
* */
readChunk() {
let decodedString = ''; //return '' by default
if (!this.isOpen) {
return decodedString;
}
let readByteCount;
try {
readByteCount = fs.readSync(this.fd, this.readBuffer, 0, this.chunkSize, this.readFilePos);
}
catch (e) {
throw new Error("reading " + this.filename + ", error:" + e.toString());
}
if (readByteCount) {
//some data read, advance readFilePos
this.readFilePos += readByteCount;
//get only the read bytes (if we reached the end of the file)
const onlyReadBytesBuf = this.readBuffer.slice(0, readByteCount);
//correctly decode as utf8, and store in decodedString
//yes, the api is called "write", but it decodes a string - it's a write-decode-and-return the string kind-of-thing :)
decodedString = this.utf8decoder.write(onlyReadBytesBuf);
}
else {
//read returns 0 => all bytes read
this.close();
}
return decodedString;
}
close() {
if (!this.isOpen) {
return;
}
fs.closeSync(this.fd);
this.isOpen = false;
this.utf8decoder.end();
}
}
exports.UTF8FileReader = UTF8FileReader;
我建议这样做,因为buffer.slice
已被弃用,对我来说,它在阅读大型pdf文件时存在问题。
import {promisify} from 'node:util';
import fs from 'node:fs';
import {Buffer} from 'node:buffer';
import pify from 'pify';
const fsReadP = pify(fs.read, {multiArgs: true});
const fsOpenP = promisify(fs.open);
const fsCloseP = promisify(fs.close);
export async function readChunk(filePath, {length, startPosition}) {
const fileDescriptor = await fsOpenP(filePath, 'r');
try {
let [bytesRead, buffer] = await fsReadP(fileDescriptor, {
buffer: Buffer.alloc(length),
length,
position: startPosition,
});
if (bytesRead < length) {
buffer = buffer.subarray(0, bytesRead);
}
return buffer;
} finally {
await fsCloseP(fileDescriptor);
}
}
export function readChunkSync(filePath, {length, startPosition}) {
let buffer = Buffer.alloc(length);
const fileDescriptor = fs.openSync(filePath, 'r');
try {
const bytesRead = fs.readSync(fileDescriptor, buffer, {
length,
position: startPosition,
});
if (bytesRead < length) {
buffer = buffer.subarray(0, bytesRead);
}
return buffer;
} finally {
fs.closeSync(fileDescriptor);
}
}