使用Dropbox API上载文件时使用`concurrent_session_ivalid_data_size`



当使用filesUploadSessionAppendV2端点将文件上载到Dropbox API时,如果上载的文件大于CHUNK_SIZE,则会出现concurrent_session_invalid_data_size错误。

以下是我的节点代码上传文件的大致情况:

// readBytes and generateChunks adapted from https://betterprogramming.pub/a-memory-friendly-way-of-reading-files-in-node-js-a45ad0cc7bb6
function readBytes(fd, sharedBuffer) {
return new Promise((resolve, reject) => {
fs.read(fd, sharedBuffer, 0, sharedBuffer.length, null, (err) => {
if (err) return reject(err);
resolve();
});
});
}
async function* generateChunks(filePath, size) {
const sharedBuffer = Buffer.alloc(size);
const stats = fs.statSync(filePath); // file details
const fd = fs.openSync(filePath); // file descriptor
let bytesRead = 0;
let end = size;
for (let i = 0; i < Math.ceil(stats.size / size); i++) {
await readBytes(fd, sharedBuffer);
bytesRead = (i + 1) * size;
if (bytesRead > stats.size) end = size - (bytesRead - stats.size);
yield [
sharedBuffer.slice(0, end),
{ bytesRead, size: stats.size, done: bytesRead >= stats.size },
];
}
}
// make sure this is a multiple of 4MB
const CHUNK_SIZE = 40 * 1000000;
// get session_ids
const { result: { session_ids } } = await client.filesUploadSessionStartBatch({
num_sessions: 1
});
const session_id = session_ids[0];
// upload chunks
const src = "file.zip";
let offset = 0;
for await (const [chunk, progress] of generateChunks(src, CHUNK_SIZE)) {
await client.filesUploadSessionAppendV2({
contents: chunk,
cursor: {
session_id,
offset,
},
close: progress.done,
});
if (progress.done) offset = progress.size;
else offset += CHUNK_SIZE;
}
// finalize
await client.filesUploadSessionFinishBatchV2({
entries: [
{ cursor: { session_id, offset }, commit: { path: "/file.zip", mode: "add", autorename: true } }
]
});

原来concurrent_session_invalid_data_size的意思是";嘿,你没有以4MB的倍数发送这个块";。

也称为1MB !== 1000000 bytes1MB === Math.pow(2, 20) === 1048576 bytes

我唯一需要改变的是:

- const CHUNK_SIZE = 40 * 1000000; 
+ const CHUNK_SIZE = 40 * Math.pow(2, 20);

🤦‍♂️

最新更新