This commit is contained in:
ditiqi 2025-05-29 11:12:16 +08:00
parent 047e1fb80a
commit 89a633152c
41 changed files with 729 additions and 4428 deletions

View File

@ -11,6 +11,7 @@
}, },
"dependencies": { "dependencies": {
"@aws-sdk/client-s3": "^3.723.0", "@aws-sdk/client-s3": "^3.723.0",
"@aws-sdk/s3-request-presigner": "^3.817.0",
"@hono/zod-validator": "^0.5.0", "@hono/zod-validator": "^0.5.0",
"@repo/db": "workspace:*", "@repo/db": "workspace:*",
"@shopify/semaphore": "^3.1.0", "@shopify/semaphore": "^3.1.0",
@ -26,12 +27,12 @@
"zod": "^3.25.23" "zod": "^3.25.23"
}, },
"devDependencies": { "devDependencies": {
"@redis/client": "^1.6.0",
"@types/debug": "^4.1.12", "@types/debug": "^4.1.12",
"@types/lodash.throttle": "^4.1.9", "@types/lodash.throttle": "^4.1.9",
"@types/multistream": "^4.1.3", "@types/multistream": "^4.1.3",
"@types/node": "^22.15.21", "@types/node": "^22.15.21",
"typescript": "^5.0.0", "typescript": "^5.0.0"
"@redis/client": "^1.6.0"
}, },
"peerDependencies": { "peerDependencies": {
"@repo/db": "workspace:*", "@repo/db": "workspace:*",

View File

@ -27,7 +27,6 @@ export function createStorageRoutes(basePath: string = '/api/storage') {
return c.json(result); return c.json(result);
}); });
// 删除资源 // 删除资源
app.delete('/resource/:id', async (c) => { app.delete('/resource/:id', async (c) => {
const id = c.req.param('id'); const id = c.req.param('id');
@ -305,6 +304,7 @@ export function createFileDownloadRoutes(downloadPath: string = '/download') {
const encodedFileId = c.req.param('fileId'); const encodedFileId = c.req.param('fileId');
const fileId = decodeURIComponent(encodedFileId); const fileId = decodeURIComponent(encodedFileId);
console.log('=== DOWNLOAD DEBUG START ===');
console.log('Download request - Encoded fileId:', encodedFileId); console.log('Download request - Encoded fileId:', encodedFileId);
console.log('Download request - Decoded fileId:', fileId); console.log('Download request - Decoded fileId:', fileId);
@ -314,9 +314,92 @@ export function createFileDownloadRoutes(downloadPath: string = '/download') {
// 从数据库获取文件信息 // 从数据库获取文件信息
const { status, resource } = await getResourceByFileId(fileId); const { status, resource } = await getResourceByFileId(fileId);
if (status !== 'UPLOADED' || !resource) { if (status !== 'UPLOADED' || !resource) {
console.log('Download - File not found, status:', status);
return c.json({ error: `File not found or not ready. Status: ${status}, FileId: ${fileId}` }, 404); return c.json({ error: `File not found or not ready. Status: ${status}, FileId: ${fileId}` }, 404);
} }
// 详细记录资源信息
console.log('Download - Full resource object:', JSON.stringify(resource, null, 2));
console.log('Download - Resource title:', resource.title);
console.log('Download - Resource type:', resource.type);
console.log('Download - Resource fileId:', resource.fileId);
// 使用resource.title作为下载文件名如果没有则使用默认名称
let downloadFileName = resource.title || 'download';
// 确保文件名有正确的扩展名
if (downloadFileName && !downloadFileName.includes('.') && resource.type) {
// 如果没有扩展名尝试从MIME类型推断
const mimeTypeToExt: Record<string, string> = {
// Microsoft Office
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': '.docx',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': '.xlsx',
'application/vnd.openxmlformats-officedocument.presentationml.presentation': '.pptx',
'application/msword': '.doc',
'application/vnd.ms-excel': '.xls',
'application/vnd.ms-powerpoint': '.ppt',
// WPS Office
'application/wps-office.docx': '.docx',
'application/wps-office.xlsx': '.xlsx',
'application/wps-office.pptx': '.pptx',
'application/wps-office.doc': '.doc',
'application/wps-office.xls': '.xls',
'application/wps-office.ppt': '.ppt',
// 其他文档格式
'application/pdf': '.pdf',
'application/rtf': '.rtf',
'text/plain': '.txt',
'text/csv': '.csv',
'application/json': '.json',
'application/xml': '.xml',
'text/xml': '.xml',
// 图片格式
'image/jpeg': '.jpg',
'image/jpg': '.jpg',
'image/png': '.png',
'image/gif': '.gif',
'image/bmp': '.bmp',
'image/webp': '.webp',
'image/svg+xml': '.svg',
'image/tiff': '.tiff',
// 音频格式
'audio/mpeg': '.mp3',
'audio/wav': '.wav',
'audio/ogg': '.ogg',
'audio/aac': '.aac',
'audio/flac': '.flac',
// 视频格式
'video/mp4': '.mp4',
'video/avi': '.avi',
'video/quicktime': '.mov',
'video/x-msvideo': '.avi',
'video/webm': '.webm',
// 压缩文件
'application/zip': '.zip',
'application/x-rar-compressed': '.rar',
'application/x-7z-compressed': '.7z',
'application/gzip': '.gz',
'application/x-tar': '.tar',
// 其他常见格式
'application/octet-stream': '',
};
const extension = mimeTypeToExt[resource.type];
if (extension) {
downloadFileName += extension;
console.log('Download - Added extension from MIME type:', extension);
}
}
console.log('Download - Final download filename:', downloadFileName);
if (storageType === StorageType.LOCAL) { if (storageType === StorageType.LOCAL) {
// 本地存储:直接读取文件 // 本地存储:直接读取文件
const config = storageManager.getStorageConfig(); const config = storageManager.getStorageConfig();
@ -332,11 +415,14 @@ export function createFileDownloadRoutes(downloadPath: string = '/download') {
// 检查目录是否存在 // 检查目录是否存在
if (!fs.existsSync(fileDir)) { if (!fs.existsSync(fileDir)) {
console.log('Download - Directory not found:', fileDir);
return c.json({ error: `File directory not found: ${fileDir}` }, 404); return c.json({ error: `File directory not found: ${fileDir}` }, 404);
} }
// 读取目录内容,找到实际的文件(排除 .json 文件) // 读取目录内容,找到实际的文件(排除 .json 文件)
const files = fs.readdirSync(fileDir).filter((f) => !f.endsWith('.json')); const files = fs.readdirSync(fileDir).filter((f) => !f.endsWith('.json'));
console.log('Download - Files in directory:', files);
if (files.length === 0) { if (files.length === 0) {
return c.json({ error: `No file found in directory: ${fileDir}` }, 404); return c.json({ error: `No file found in directory: ${fileDir}` }, 404);
} }
@ -348,45 +434,101 @@ export function createFileDownloadRoutes(downloadPath: string = '/download') {
} }
const filePath = path.join(fileDir, actualFileName); const filePath = path.join(fileDir, actualFileName);
console.log('Download - Actual file in directory:', actualFileName);
console.log('Download - Full file path:', filePath);
// 获取文件统计信息 // 获取文件统计信息
const stats = fs.statSync(filePath); const stats = fs.statSync(filePath);
const fileSize = stats.size; const fileSize = stats.size;
// 设置响应头 // 强制设置正确的MIME类型
c.header('Content-Type', resource.type || 'application/octet-stream'); let contentType = resource.type || 'application/octet-stream';
c.header('Content-Length', fileSize.toString()); if (downloadFileName.endsWith('.docx')) {
c.header('Content-Disposition', `inline; filename="${actualFileName}"`); contentType = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document';
} else if (downloadFileName.endsWith('.xlsx')) {
contentType = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet';
} else if (downloadFileName.endsWith('.pdf')) {
contentType = 'application/pdf';
}
// 返回文件流 console.log('Download - Final Content-Type:', contentType);
// 处理中文文件名 - 现在使用正确的RFC 2231格式
let contentDisposition: string;
const hasNonAscii = !/^[\x00-\x7F]*$/.test(downloadFileName);
if (hasNonAscii) {
// 包含中文字符使用RFC 2231标准
const encodedFileName = encodeURIComponent(downloadFileName);
// 同时提供fallback和UTF-8编码版本
const fallbackName = downloadFileName.replace(/[^\x00-\x7F]/g, '_');
contentDisposition = `attachment; filename="${fallbackName}"; filename*=UTF-8''${encodedFileName}`;
console.log('Download - Original filename:', downloadFileName);
console.log('Download - Encoded filename:', encodedFileName);
console.log('Download - Fallback filename:', fallbackName);
} else {
// ASCII文件名使用简单格式
contentDisposition = `attachment; filename="${downloadFileName}"`;
}
// 设置所有必要的响应头
c.header('Content-Type', contentType);
c.header('Content-Length', fileSize.toString());
c.header('Content-Disposition', contentDisposition);
// 添加额外的头部以确保浏览器正确处理
c.header('Cache-Control', 'no-cache, no-store, must-revalidate');
c.header('Pragma', 'no-cache');
c.header('Expires', '0');
console.log('Download - Content-Disposition:', contentDisposition);
console.log('=== DOWNLOAD DEBUG END ===');
// 返回文件流 - 使用Hono的正确方式
const fileStream = fs.createReadStream(filePath); const fileStream = fs.createReadStream(filePath);
return new Response(fileStream as any);
// 将Node.js ReadStream转换为Web Stream
const readableStream = new ReadableStream({
start(controller) {
fileStream.on('data', (chunk) => {
controller.enqueue(chunk);
});
fileStream.on('end', () => {
controller.close();
});
fileStream.on('error', (error) => {
controller.error(error);
});
},
});
return new Response(readableStream, {
status: 200,
headers: {
'Content-Type': contentType,
'Content-Length': fileSize.toString(),
'Content-Disposition': contentDisposition,
'Cache-Control': 'no-cache, no-store, must-revalidate',
Pragma: 'no-cache',
Expires: '0',
},
});
} catch (error) { } catch (error) {
console.error('Error reading local file:', error); console.error('Error reading local file:', error);
return c.json({ error: 'Failed to read file' }, 500); return c.json({ error: 'Failed to read file' }, 500);
} }
} else if (storageType === StorageType.S3) { } else if (storageType === StorageType.S3) {
// S3 存储通过已配置的dataStore获取文件信息 // S3 存储简单重定向让S3处理文件名
const dataStore = storageManager.getDataStore();
try {
// 对于S3存储我们需要根据fileId构建完整路径
// 由于S3Store的client是私有的我们先尝试通过getUpload来验证文件存在
await (dataStore as any).getUpload(fileId + '/dummy'); // 这会失败,但能验证连接
} catch (error: any) {
// 如果是FILE_NOT_FOUND以外的错误说明连接有问题
if (error.message && !error.message.includes('FILE_NOT_FOUND')) {
console.error('S3 connection error:', error);
return c.json({ error: 'Failed to access S3 storage' }, 500);
}
}
// 构建S3 URL - 使用resource信息重建完整路径
// 这里我们假设文件名就是resource.title
const config = storageManager.getStorageConfig(); const config = storageManager.getStorageConfig();
const s3Config = config.s3!; const s3Config = config.s3!;
// 构建S3 key - 使用fileId和原始文件名
const fileName = resource.title || 'file'; const fileName = resource.title || 'file';
const fullS3Key = `${fileId}/${fileName}`; const fullS3Key = `${fileId}/${fileName}`;
console.log('Download - S3 Key:', fullS3Key);
// 生成 S3 URL // 生成 S3 URL
let s3Url: string; let s3Url: string;
if (s3Config.endpoint && s3Config.endpoint !== 'https://s3.amazonaws.com') { if (s3Config.endpoint && s3Config.endpoint !== 'https://s3.amazonaws.com') {
@ -398,6 +540,7 @@ export function createFileDownloadRoutes(downloadPath: string = '/download') {
} }
console.log(`Redirecting to S3 URL: ${s3Url}`); console.log(`Redirecting to S3 URL: ${s3Url}`);
console.log('=== DOWNLOAD DEBUG END ===');
// 重定向到 S3 URL // 重定向到 S3 URL
return c.redirect(s3Url, 302); return c.redirect(s3Url, 302);
} }

View File

@ -0,0 +1,261 @@
#!/usr/bin/env node
/**
* MinIO配置测试脚本
* 基于用户提供的具体配置进行测试
*/
const { S3 } = require('@aws-sdk/client-s3');
const fs = require('fs');
const path = require('path');
async function testMinIOConfig() {
console.log('🔍 开始测试MinIO配置...\n');
// 用户提供的配置
const config = {
endpoint: 'http://localhost:9000',
region: 'us-east-1',
credentials: {
accessKeyId: '7Nt7OyHkwIoo3zvSKdnc',
secretAccessKey: 'EZ0cyrjJAsabTLNSqWcU47LURMppBW2kka3LuXzb',
},
forcePathStyle: true,
};
const bucketName = 'test123';
const uploadDir = '/opt/projects/nice/uploads';
console.log('📋 配置信息:');
console.log(` Endpoint: ${config.endpoint}`);
console.log(` Region: ${config.region}`);
console.log(` Bucket: ${bucketName}`);
console.log(` Upload Dir: ${uploadDir}`);
console.log(` Access Key: ${config.credentials.accessKeyId}`);
console.log(` Force Path Style: ${config.forcePathStyle}`);
console.log();
try {
const s3Client = new S3(config);
// 1. 测试基本连接和认证
console.log('📡 测试连接和认证...');
try {
const buckets = await s3Client.listBuckets();
console.log('✅ 连接和认证成功!');
console.log(`📂 现有存储桶: ${buckets.Buckets?.map((b) => b.Name).join(', ') || '无'}`);
} catch (error) {
console.log('❌ 连接失败:', error.message);
if (error.message.includes('ECONNREFUSED')) {
console.log('💡 提示: MinIO服务可能未运行请检查localhost:9000是否可访问');
} else if (error.message.includes('Invalid')) {
console.log('💡 提示: 检查访问密钥和密钥是否正确');
}
return false;
}
// 2. 检查目标存储桶
console.log(`\n🪣 检查存储桶 "${bucketName}"...`);
let bucketExists = false;
try {
await s3Client.headBucket({ Bucket: bucketName });
console.log(`✅ 存储桶 "${bucketName}" 存在并可访问`);
bucketExists = true;
} catch (error) {
if (error.name === 'NotFound') {
console.log(`❌ 存储桶 "${bucketName}" 不存在`);
console.log('🔧 尝试创建存储桶...');
try {
await s3Client.createBucket({ Bucket: bucketName });
console.log(`✅ 存储桶 "${bucketName}" 创建成功`);
bucketExists = true;
} catch (createError) {
console.log(`❌ 创建存储桶失败: ${createError.message}`);
return false;
}
} else {
console.log(`❌ 检查存储桶时出错: ${error.message}`);
return false;
}
}
if (!bucketExists) {
return false;
}
// 3. 检查上传目录
console.log(`\n📁 检查上传目录 "${uploadDir}"...`);
try {
if (!fs.existsSync(uploadDir)) {
console.log('📁 上传目录不存在,正在创建...');
fs.mkdirSync(uploadDir, { recursive: true });
console.log('✅ 上传目录创建成功');
} else {
console.log('✅ 上传目录存在');
}
} catch (error) {
console.log(`❌ 检查/创建上传目录失败: ${error.message}`);
}
// 4. 测试文件上传
console.log('\n📤 测试文件上传...');
const testFileName = `test-upload-${Date.now()}.txt`;
const testContent = `这是一个测试文件
创建时间: ${new Date().toISOString()}
用户: nice1234
MinIO测试成功`;
try {
await s3Client.putObject({
Bucket: bucketName,
Key: testFileName,
Body: testContent,
ContentType: 'text/plain',
Metadata: {
'test-type': 'config-validation',
'created-by': 'test-script',
},
});
console.log(`✅ 文件上传成功: ${testFileName}`);
} catch (error) {
console.log(`❌ 文件上传失败: ${error.message}`);
console.log('错误详情:', error);
return false;
}
// 5. 测试文件下载验证
console.log('\n📥 测试文件下载验证...');
try {
const result = await s3Client.getObject({
Bucket: bucketName,
Key: testFileName,
});
// 读取流内容
const chunks = [];
for await (const chunk of result.Body) {
chunks.push(chunk);
}
const downloadedContent = Buffer.concat(chunks).toString();
if (downloadedContent === testContent) {
console.log('✅ 文件下载验证成功,内容一致');
} else {
console.log('❌ 文件内容不一致');
return false;
}
} catch (error) {
console.log(`❌ 文件下载失败: ${error.message}`);
return false;
}
// 6. 测试分片上传
console.log('\n🔄 测试分片上传功能...');
const multipartKey = `multipart-test-${Date.now()}.dat`;
try {
const multipartUpload = await s3Client.createMultipartUpload({
Bucket: bucketName,
Key: multipartKey,
Metadata: {
'test-type': 'multipart-upload',
},
});
console.log(`✅ 分片上传初始化成功: ${multipartUpload.UploadId}`);
// 清理测试
await s3Client.abortMultipartUpload({
Bucket: bucketName,
Key: multipartKey,
UploadId: multipartUpload.UploadId,
});
console.log('✅ 分片上传测试完成并清理');
} catch (error) {
console.log(`❌ 分片上传测试失败: ${error.message}`);
return false;
}
// 7. 列出存储桶中的文件
console.log('\n📂 列出存储桶中的文件...');
try {
const listResult = await s3Client.listObjectsV2({
Bucket: bucketName,
MaxKeys: 10,
});
console.log(`✅ 存储桶中共有 ${listResult.KeyCount || 0} 个文件`);
if (listResult.Contents && listResult.Contents.length > 0) {
console.log('最近的文件:');
listResult.Contents.slice(-5).forEach((obj, index) => {
const size = obj.Size < 1024 ? `${obj.Size}B` : `${Math.round(obj.Size / 1024)}KB`;
console.log(` ${index + 1}. ${obj.Key} (${size})`);
});
}
} catch (error) {
console.log(`❌ 列出文件失败: ${error.message}`);
}
// 8. 清理测试文件
console.log('\n🧹 清理测试文件...');
try {
await s3Client.deleteObject({
Bucket: bucketName,
Key: testFileName,
});
console.log('✅ 测试文件清理完成');
} catch (error) {
console.log(`⚠️ 清理测试文件失败: ${error.message}`);
}
console.log('\n🎉 所有测试通过您的MinIO配置完全正确');
console.log('\n📝 配置摘要:');
console.log('- ✅ 连接正常');
console.log('- ✅ 认证有效');
console.log('- ✅ 存储桶可用');
console.log('- ✅ 文件上传/下载正常');
console.log('- ✅ 分片上传支持');
console.log('\n💡 您可以在应用中使用这些配置:');
console.log('STORAGE_TYPE=s3');
console.log(`UPLOAD_DIR=${uploadDir}`);
console.log(`S3_ENDPOINT=${config.endpoint}`);
console.log(`S3_REGION=${config.region}`);
console.log(`S3_BUCKET=${bucketName}`);
console.log(`S3_ACCESS_KEY_ID=${config.credentials.accessKeyId}`);
console.log('S3_SECRET_ACCESS_KEY=***');
console.log('S3_FORCE_PATH_STYLE=true');
return true;
} catch (error) {
console.log(`❌ 测试过程中发生未预期错误: ${error.message}`);
console.log('错误堆栈:', error.stack);
return false;
}
}
// 主函数
async function main() {
console.log('🚀 MinIO S3存储配置测试\n');
// 检查依赖
try {
require('@aws-sdk/client-s3');
} catch (error) {
console.log('❌ 缺少必要依赖 @aws-sdk/client-s3');
console.log('请运行: npm install @aws-sdk/client-s3');
process.exit(1);
}
const success = await testMinIOConfig();
if (success) {
console.log('\n✅ 测试完成MinIO配置正确可以正常使用');
process.exit(0);
} else {
console.log('\n❌ 测试失败:请检查上述错误并修复配置');
process.exit(1);
}
}
main().catch((error) => {
console.error('❌ 脚本执行失败:', error);
process.exit(1);
});

View File

@ -1,36 +0,0 @@
{
"name": "@repo/tus",
"version": "1.0.0",
"private": true,
"exports": {
".": "./src/index.ts"
},
"scripts": {
"build": "tsup",
"dev": "tsup --watch",
"dev-static": "tsup --no-watch",
"clean": "rimraf dist",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.723.0",
"@shopify/semaphore": "^3.1.0",
"debug": "^4.4.0",
"lodash.throttle": "^4.1.1",
"multistream": "^4.1.0"
},
"devDependencies": {
"@types/debug": "^4.1.12",
"@types/lodash.throttle": "^4.1.9",
"@types/multistream": "^4.1.3",
"@types/node": "^20.3.1",
"concurrently": "^8.0.0",
"ioredis": "^5.4.1",
"rimraf": "^6.0.1",
"should": "^13.2.3",
"ts-node": "^10.9.1",
"tsup": "^8.3.5",
"typescript": "^5.5.4",
"@redis/client": "^1.6.0"
}
}

View File

@ -1,345 +0,0 @@
import EventEmitter from 'node:events';
import stream from 'node:stream/promises';
import { addAbortSignal, PassThrough } from 'node:stream';
import type http from 'node:http';
import type { ServerOptions } from '../types';
import throttle from 'lodash.throttle';
import { CancellationContext, DataStore, ERRORS, EVENTS, StreamLimiter, Upload } from '../utils';
/**
* URL ID
* URL
* - `([^/]+)`
* - `\/?$`
*
* - `/files/12345` `12345`
* - `/files/12345/` `12345`
*/
const reExtractFileID = /([^/]+)\/?$/;
/**
* HTTP `forwarded`
* `host="<value>"` `host=<value>` `<value>`
* - `host="?` `host=` `host="`
* - `([^";]+)`
*
* - `host="example.com"` `example.com`
* - `host=example.com` `example.com`
*/
const reForwardedHost = /host="?([^";]+)/;
/**
* HTTP `forwarded` `http` `https`
* `proto=<value>` `<value>`
* - `proto=` `proto=`
* - `(https?)` `http` `https`
*
* - `proto=https` `https`
* - `proto=http` `http`
*/
const reForwardedProto = /proto=(https?)/;
/**
* BaseHandler TUS
* Node.js EventEmitter
*/
export class BaseHandler extends EventEmitter {
options: ServerOptions;
store: DataStore;
/**
* BaseHandler
* @param store -
* @param options -
* @throws store
*/
constructor(store: DataStore, options: ServerOptions) {
super();
if (!store) {
throw new Error('Store must be defined');
}
this.store = store;
this.options = options;
}
/**
* HTTP
* @param res - HTTP
* @param status - HTTP
* @param headers -
* @param body -
* @returns
*/
write(res: http.ServerResponse, status: number, headers = {}, body = '') {
if (status !== 204) {
(headers as any)['Content-Length'] = Buffer.byteLength(body, 'utf8');
}
res.writeHead(status, headers);
res.write(body);
return res.end();
}
/**
* URL
* @param req - HTTP
* @param id - ID
* @returns URL
*/
generateUrl(req: http.IncomingMessage, id: string) {
const path = this.options.path === '/' ? '' : this.options.path;
if (this.options.generateUrl) {
// 使用用户定义的 generateUrl 函数生成 URL
const { proto, host } = this.extractHostAndProto(req);
return this.options.generateUrl(req, {
proto,
host,
path: path,
id,
});
}
// 默认实现
if (this.options.relativeLocation) {
return `${path}/${id}`;
}
const { proto, host } = this.extractHostAndProto(req);
return `${proto}://${host}${path}/${id}`;
}
/**
* ID
* @param req - HTTP
* @returns ID undefined
*/
getFileIdFromRequest(req: http.IncomingMessage) {
const match = reExtractFileID.exec(req.url as string);
if (this.options.getFileIdFromRequest) {
const lastPath = match?.[1] ? decodeURIComponent(match[1]) : undefined;
return this.options.getFileIdFromRequest(req, lastPath);
}
if (!match?.[1] || this.options.path.includes(match[1])) {
return;
}
return decodeURIComponent(match[1]);
}
/**
* HTTP
* respectForwardedHeaders
*
* 使http
*
* @param req - HTTP
* @returns
*/
protected extractHostAndProto(req: http.IncomingMessage) {
let proto: string | undefined;
let host: string | undefined;
// 如果启用了尊重转发头选项
if (this.options.respectForwardedHeaders) {
// 从请求头中获取 forwarded 字段
const forwarded = req.headers.forwarded as string | undefined;
if (forwarded) {
// 使用正则表达式从 forwarded 字段中提取主机名和协议
host ??= reForwardedHost.exec(forwarded)?.[1];
proto ??= reForwardedProto.exec(forwarded)?.[1];
}
// 从请求头中获取 x-forwarded-host 和 x-forwarded-proto 字段
const forwardHost = req.headers['x-forwarded-host'];
const forwardProto = req.headers['x-forwarded-proto'];
// 检查 x-forwarded-proto 是否为有效的协议http 或 https
// @ts-expect-error we can pass undefined
if (['http', 'https'].includes(forwardProto)) {
proto ??= forwardProto as string;
}
// 如果 x-forwarded-host 存在,则使用它作为主机名
host ??= forwardHost as string;
}
// 如果未从转发头中获取到主机名,则使用请求头中的 host 字段
host ??= req.headers.host;
// 如果未从转发头中获取到协议,则默认使用 http
proto ??= 'http';
// 返回包含主机名和协议的对象
return { host: host as string, proto };
}
/**
*
* @param req - HTTP
* @returns
*/
protected async getLocker(req: http.IncomingMessage) {
if (typeof this.options.locker === 'function') {
return this.options.locker(req);
}
return this.options.locker;
}
/**
*
* @param req - HTTP
* @param id - ID
* @param context -
* @returns
*/
protected async acquireLock(req: http.IncomingMessage, id: string, context: CancellationContext) {
const locker = await this.getLocker(req);
const lock = locker.newLock(id);
await lock.lock(() => {
context.cancel();
});
return lock;
}
/**
*
* HTTP
*
* @param req - HTTP
* @param upload - ID
* @param maxFileSize -
* @param context -
* @returns Promise
*/
protected writeToStore(req: http.IncomingMessage, upload: Upload, maxFileSize: number, context: CancellationContext) {
// 使用 Promise 包装异步操作,以便更好地处理取消和错误。
// biome-ignore lint/suspicious/noAsyncPromiseExecutor: <explanation>
return new Promise<number>(async (resolve, reject) => {
// 检查是否已被取消,如果已取消则直接拒绝 Promise。
if (context.signal.aborted) {
reject(ERRORS.ABORTED);
return;
}
// 创建一个 PassThrough 流作为代理,用于管理请求流。
// PassThrough 流是一个透明的流,它允许数据通过而不进行任何修改。
// 使用代理流的好处是可以在不影响原始请求流的情况下中止写入过程。
const proxy = new PassThrough();
// 将取消信号与代理流关联,以便在取消时自动中止流。
addAbortSignal(context.signal, proxy);
// 监听代理流的错误事件,处理流中的错误。
proxy.on('error', (err) => {
// 取消请求流与代理流的管道连接。
req.unpipe(proxy);
// 如果错误是 AbortError则返回 ABORTED 错误,否则返回原始错误。
reject(err.name === 'AbortError' ? ERRORS.ABORTED : err);
});
// 使用 throttle 函数创建一个节流函数,用于定期触发 POST_RECEIVE_V2 事件。
// 该事件用于通知上传进度,避免频繁触发事件导致性能问题。
const postReceive = throttle(
(offset: number) => {
// 触发 POST_RECEIVE_V2 事件,传递当前上传的偏移量。
this.emit(EVENTS.POST_RECEIVE_V2, req, { ...upload, offset });
},
// 设置节流的时间间隔,避免事件触发过于频繁。
this.options.postReceiveInterval,
{ leading: false },
);
// 临时变量,用于跟踪当前写入的偏移量。
let tempOffset = upload.offset;
// 监听代理流的 data 事件,每当有数据块通过时更新偏移量并触发进度事件。
proxy.on('data', (chunk: Buffer) => {
tempOffset += chunk.byteLength;
postReceive(tempOffset);
});
// 监听请求流的 error 事件,处理请求流中的错误。
req.on('error', () => {
// 如果代理流未关闭,则优雅地结束流,以便将剩余的字节作为 incompletePart 上传到存储。
if (!proxy.closed) {
proxy.end();
}
});
// 使用 stream.pipeline 将请求流通过代理流和 StreamLimiter 传输到存储系统。
// StreamLimiter 用于限制写入的数据量,确保不超过最大文件大小。
stream
.pipeline(
// 将请求流通过代理流传输。
req.pipe(proxy),
// 使用 StreamLimiter 限制写入的数据量。
new StreamLimiter(maxFileSize),
// 将数据流写入存储系统。
async (stream) => {
return this.store.write(stream as StreamLimiter, upload.id, upload.offset);
},
)
// 如果管道操作成功,则解析 Promise 并返回写入的字节数。
.then(resolve)
// 如果管道操作失败,则拒绝 Promise 并返回错误。
.catch(reject);
});
}
/**
*
* @param req - HTTP
* @param id - ID
* @returns
*/
getConfiguredMaxSize(req: http.IncomingMessage, id: string | null) {
if (typeof this.options.maxSize === 'function') {
return this.options.maxSize(req, id);
}
return this.options.maxSize ?? 0;
}
/**
*
*
* @param req - HTTP
* @param file -
* @param configuredMaxSize -
* @returns
* @throws ERRORS.ERR_SIZE_EXCEEDED
*/
async calculateMaxBodySize(req: http.IncomingMessage, file: Upload, configuredMaxSize?: number) {
// 如果未明确提供,则使用服务器配置的最大大小。
configuredMaxSize ??= await this.getConfiguredMaxSize(req, file.id);
// 从请求中解析 Content-Length 头(如果未设置,则默认为 0
const length = Number.parseInt(req.headers['content-length'] || '0', 10);
const offset = file.offset;
const hasContentLengthSet = req.headers['content-length'] !== undefined;
const hasConfiguredMaxSizeSet = configuredMaxSize > 0;
if (file.sizeIsDeferred) {
// 对于延迟大小的上传,如果不是分块传输,则检查配置的最大大小。
if (hasContentLengthSet && hasConfiguredMaxSizeSet && offset + length > configuredMaxSize) {
throw ERRORS.ERR_SIZE_EXCEEDED;
}
if (hasConfiguredMaxSizeSet) {
return configuredMaxSize - offset;
}
return Number.MAX_SAFE_INTEGER;
}
// 检查上传是否适合文件的大小(当大小不是延迟的时)。
if (offset + length > (file.size || 0)) {
throw ERRORS.ERR_SIZE_EXCEEDED;
}
if (hasContentLengthSet) {
return length;
}
return (file.size || 0) - offset;
}
}

View File

@ -1,64 +0,0 @@
import { CancellationContext, ERRORS, EVENTS } from '../utils'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
export class DeleteHandler extends BaseHandler {
/**
* DELETE请求的核心方法
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param context
* @returns HTTP响应对象
*
*
* - HTTP DELETE方法删除指定资源
* - 使
* -
*
*
* -
* - 使
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse,
context: CancellationContext
) {
// 从请求中提取文件ID
const id = this.getFileIdFromRequest(req)
// 文件ID不存在时抛出异常
if (!id) {
throw ERRORS.FILE_NOT_FOUND
}
// 执行自定义的请求处理钩子
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
// 获取文件操作锁,保证并发安全
const lock = await this.acquireLock(req, id, context)
try {
// 检查是否禁止删除已完成的上传
if (this.options.disableTerminationForFinishedUploads) {
const upload = await this.store.getUpload(id)
// 上传已完成时抛出异常
if (upload.offset === upload.size) {
throw ERRORS.INVALID_TERMINATION
}
}
// 从存储中删除指定文件
await this.store.remove(id)
} finally {
// 无论成功与否,最终都要释放锁
await lock.unlock()
}
// 返回204 No Content响应
const writtenRes = this.write(res, 204, {})
// 触发删除完成事件
this.emit(EVENTS.POST_TERMINATE, req, writtenRes, id)
return writtenRes
}
}

View File

@ -1,189 +0,0 @@
/**
* GetHandler.ts
* HTTP GET请求
* 使Web服务
*/
import stream from 'node:stream'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
import type { RouteHandler } from '../types'
import { ERRORS, Upload } from '../utils'
/**
* GetHandler类
* GET请求
*
* 使
* const handler = new GetHandler()
* handler.registerPath('/custom', customHandler)
*/
export class GetHandler extends BaseHandler {
// 使用Map存储路径与处理函数的映射关系提供O(1)的查找时间复杂度
paths: Map<string,RouteHandler> = new Map()
/**
* MIME类型是否符合RFC1341规范
* MIME类型text/plain; charset=utf-8
* O(n)n为字符串长度
*
*/
reMimeType =
// biome-ignore lint/suspicious/noControlCharactersInRegex: it's fine
/^(?:application|audio|example|font|haptics|image|message|model|multipart|text|video|x-(?:[0-9A-Za-z!#$%&'*+.^_`|~-]+))\/([0-9A-Za-z!#$%&'*+.^_`|~-]+)((?:[ ]*;[ ]*[0-9A-Za-z!#$%&'*+.^_`|~-]+=(?:[0-9A-Za-z!#$%&'*+.^_`|~-]+|"(?:[^"\\]|\.)*"))*)$/
/**
* MIME类型白名单
* 使Set数据结构O(1)
*
*/
mimeInlineBrowserWhitelist = new Set([
'text/plain',
'image/png',
'image/jpeg',
'image/gif',
'image/bmp',
'image/webp',
'audio/wave',
'audio/wav',
'audio/x-wav',
'audio/x-pn-wav',
'audio/webm',
'audio/ogg',
'video/mp4',
'video/webm',
'video/ogg',
'application/ogg',
])
/**
*
*
*
* - path: 请求路径
* - handler: 处理函数
* O(1)
*
*/
registerPath(path: string, handler: RouteHandler): void {
this.paths.set(path, handler)
}
/**
*
* GET请求
*
* - req: HTTP请求对象
* - res: HTTP响应对象
* void
* FILE_NOT_FOUND错误
* O(n)n为文件大小
*
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse
// biome-ignore lint/suspicious/noConfusingVoidType: it's fine
): Promise<stream.Writable | void> {
// 检查是否注册了自定义路径处理
if (this.paths.has(req.url as string)) {
const handler = this.paths.get(req.url as string) as RouteHandler
return handler(req, res)
}
// 检查数据存储是否支持读取操作
if (!('read' in this.store)) {
throw ERRORS.FILE_NOT_FOUND
}
// 从请求中提取文件ID
const id = this.getFileIdFromRequest(req)
if (!id) {
throw ERRORS.FILE_NOT_FOUND
}
// 执行自定义请求处理回调
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
// 获取文件上传状态
const stats = await this.store.getUpload(id)
// 验证文件是否完整
if (!stats || stats.offset !== stats.size) {
throw ERRORS.FILE_NOT_FOUND
}
// 处理内容类型和内容处置头
const { contentType, contentDisposition } = this.filterContentType(stats)
// 创建文件读取流
// @ts-expect-error exists if supported
const file_stream = await this.store.read(id)
const headers = {
'Content-Length': stats.offset,
'Content-Type': contentType,
'Content-Disposition': contentDisposition,
}
res.writeHead(200, headers)
// 使用流管道传输数据
return stream.pipeline(file_stream, res, () => {
// 忽略流传输错误
})
}
/**
*
* Content-Type和Content-Disposition头
*
* - stats: 文件上传状态对象
* contentType和contentDisposition的对象
* O(1)
* MIME类型验证规则
*/
filterContentType(stats: Upload): {
contentType: string
contentDisposition: string
} {
let contentType: string
let contentDisposition: string
// 从元数据中提取文件类型和名称
const { filetype, filename } = stats.metadata ?? {}
// 验证文件类型格式
if (filetype && this.reMimeType.test(filetype)) {
contentType = filetype
// 检查是否在白名单中
if (this.mimeInlineBrowserWhitelist.has(filetype)) {
contentDisposition = 'inline'
} else {
contentDisposition = 'attachment'
}
} else {
// 使用默认类型并强制下载
contentType = 'application/octet-stream'
contentDisposition = 'attachment'
}
// 添加文件名到内容处置头
if (filename) {
contentDisposition += `; filename=${this.quote(filename)}`
}
return {
contentType,
contentDisposition,
}
}
/**
*
*
*
* - value: 需要转义的字符串
*
* O(n)n为字符串长度
* 使
*/
quote(value: string) {
return `"${value.replace(/"/g, '\\"')}"`
}
}

View File

@ -1,90 +0,0 @@
/**
* HeadHandler
* TUS协议的HEAD请求
* 使使
*/
import { CancellationContext, ERRORS, Upload, Metadata } from '../utils'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
/**
* HeadHandler类
* TUS协议的HEAD请求
* BaseHandler
* 使
* const handler = new HeadHandler(store, options)
* await handler.send(req, res, context)
*/
export class HeadHandler extends BaseHandler {
/**
* HEAD请求的核心方法
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param context
* @returns HTTP响应
* @throws ERRORS.FILE_NOT_FOUND ID不存在时抛出
* @throws ERRORS.FILE_NO_LONGER_EXISTS
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse,
context: CancellationContext
) {
// 从请求中提取文件ID
const id = this.getFileIdFromRequest(req)
if (!id) {
throw ERRORS.FILE_NOT_FOUND
}
// 执行自定义的请求预处理逻辑
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
// 获取文件锁,防止并发操作
const lock = await this.acquireLock(req, id, context)
let file: Upload
try {
// 从存储中获取文件上传信息
file = await this.store.getUpload(id)
} finally {
// 无论成功与否,都释放锁
await lock.unlock()
}
// 检查文件是否已过期
const now = new Date()
if (
this.store.hasExtension('expiration') &&
this.store.getExpiration() > 0 &&
file.creation_date &&
now > new Date(new Date(file.creation_date).getTime() + this.store.getExpiration())
) {
throw ERRORS.FILE_NO_LONGER_EXISTS
}
// 设置响应头,防止缓存
res.setHeader('Cache-Control', 'no-store')
// 返回当前上传偏移量
res.setHeader('Upload-Offset', file.offset)
// 处理文件大小信息
if (file.sizeIsDeferred) {
// 如果文件大小未知,设置延迟长度标志
res.setHeader('Upload-Defer-Length', '1')
} else {
// 如果文件大小已知,返回实际大小
res.setHeader('Upload-Length', file.size as number)
}
// 处理文件元数据
if (file.metadata !== undefined) {
// 将元数据转换为字符串格式返回
res.setHeader('Upload-Metadata', Metadata.stringify(file.metadata) as string)
}
// 结束响应
return res.end()
}
}

View File

@ -1,61 +0,0 @@
/**
* OptionsHandler
* TUS协议的OPTIONS请求TUS协议版本
* 使TUS文件上传协议中OPTIONS请求获取服务器支持的功能和配置
*/
import { ALLOWED_METHODS, HEADERS, MAX_AGE } from '../utils'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
/**
* OptionsHandler类
* TUS协议的OPTIONS请求
* BaseHandler
* 使
* const handler = new OptionsHandler(store, options)
* handler.send(req, res)
*/
export class OptionsHandler extends BaseHandler {
/**
* OPTIONS请求并发送响应
* @param req - HTTP请求对象
* @param res - HTTP响应对象
* @returns Promise<void>
*
* 1.
* 2. TUS协议版本
* 3. CORS相关头信息
* 4. 204 No Content状态码
* BaseHandler的异常处理机制
*/
async send(req: http.IncomingMessage, res: http.ServerResponse) {
// 获取服务器配置的最大文件大小
const maxSize = await this.getConfiguredMaxSize(req, null)
// 设置TUS协议版本头固定为1.0.0
res.setHeader('Tus-Version', '1.0.0')
// 如果存储模块支持扩展功能设置TUS扩展头
if (this.store.extensions.length > 0) {
res.setHeader('Tus-Extension', this.store.extensions.join(','))
}
// 如果配置了最大文件大小设置TUS最大文件大小头
if (maxSize) {
res.setHeader('Tus-Max-Size', maxSize)
}
// 合并默认和自定义的允许头信息
const allowedHeaders = [...HEADERS, ...(this.options.allowedHeaders ?? [])]
// 设置CORS相关头信息
res.setHeader('Access-Control-Allow-Methods', ALLOWED_METHODS)
res.setHeader('Access-Control-Allow-Headers', allowedHeaders.join(', '))
res.setHeader('Access-Control-Max-Age', MAX_AGE)
// 返回204 No Content状态码表示请求成功但无内容返回
return this.write(res, 204)
}
}

View File

@ -1,256 +0,0 @@
/**
* PATCH请求处理器模块
*
* TUS协议中的PATCH请求
*
*
* 使
* -
* -
* -
*/
import debug from 'debug'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
import { CancellationContext, ERRORS, Upload, EVENTS } from '../utils'
const log = debug('tus-node-server:handlers:patch')
/**
* PATCH请求处理器类
*
* BaseHandlerTUS协议的PATCH请求
*
*
*
* -
* - 使async/await处理异步操作
* - EVENTS触发相关事件
*
* 使
* const handler = new PatchHandler(store, options)
* handler.send(req, res, context)
*/
export class PatchHandler extends BaseHandler {
/**
* PATCH请求的核心方法
*
*
* 1.
* 2.
* 3.
* 4.
* 5.
*
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param context
* @returns HTTP响应
*
*
* - ERRORS.FILE_NOT_FOUND
* - ERRORS.MISSING_OFFSET
* - ERRORS.INVALID_CONTENT_TYPE
* - ERRORS.FILE_NO_LONGER_EXISTS
* - ERRORS.INVALID_OFFSET
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse,
context: CancellationContext
) {
try {
// 从请求中获取文件ID
const id = this.getFileIdFromRequest(req)
// console.log('id', id)
if (!id) {
throw ERRORS.FILE_NOT_FOUND
}
// 验证Upload-Offset头是否存在
if (req.headers['upload-offset'] === undefined) {
throw ERRORS.MISSING_OFFSET
}
// 解析偏移量
const offset = Number.parseInt(req.headers['upload-offset'] as string, 10)
// 验证Content-Type头是否存在
const content_type = req.headers['content-type']
if (content_type === undefined) {
throw ERRORS.INVALID_CONTENT_TYPE
}
// 触发请求到达事件
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
// 获取配置的最大文件大小
const maxFileSize = await this.getConfiguredMaxSize(req, id)
// 获取文件锁
const lock = await this.acquireLock(req, id, context)
let upload: Upload
let newOffset: number
try {
// 从存储中获取上传信息
upload = await this.store.getUpload(id)
// 检查文件是否已过期
const now = Date.now()
const creation = upload.creation_date
? new Date(upload.creation_date).getTime()
: now
const expiration = creation + this.store.getExpiration()
if (
this.store.hasExtension('expiration') &&
this.store.getExpiration() > 0 &&
now > expiration
) {
throw ERRORS.FILE_NO_LONGER_EXISTS
}
// 验证偏移量是否匹配
if (upload.offset !== offset) {
log(
`[PatchHandler] send: Incorrect offset - ${offset} sent but file is ${upload.offset}`
)
throw ERRORS.INVALID_OFFSET
}
// 处理上传长度相关头信息
const upload_length = req.headers['upload-length'] as string | undefined
if (upload_length !== undefined) {
const size = Number.parseInt(upload_length, 10)
// 检查是否支持延迟长度声明
if (!this.store.hasExtension('creation-defer-length')) {
throw ERRORS.UNSUPPORTED_CREATION_DEFER_LENGTH_EXTENSION
}
// 检查上传长度是否已设置
if (upload.size !== undefined) {
throw ERRORS.INVALID_LENGTH
}
// 验证长度是否有效
if (size < upload.offset) {
throw ERRORS.INVALID_LENGTH
}
// 检查是否超过最大文件大小
if (maxFileSize > 0 && size > maxFileSize) {
throw ERRORS.ERR_MAX_SIZE_EXCEEDED
}
// 声明上传长度
await this.store.declareUploadLength(id, size)
upload.size = size
}
// 计算最大请求体大小
const maxBodySize = await this.calculateMaxBodySize(req, upload, maxFileSize)
// 写入数据到存储
newOffset = await this.writeToStore(req, upload, maxBodySize, context)
} finally {
// 释放文件锁
await lock.unlock()
}
// 更新上传偏移量
upload.offset = newOffset
// 触发数据接收完成事件
this.emit(EVENTS.POST_RECEIVE, req, res, upload)
// 构建响应数据
const responseData = {
status: 204,
headers: {
'Upload-Offset': newOffset,
} as Record<string, string | number>,
body: '',
}
// 处理上传完成事件
// 文件上传完成后的处理逻辑块
if (newOffset === upload.size && this.options.onUploadFinish) {
try {
// 调用上传完成回调函数,支持异步处理
// 允许用户自定义上传完成后的处理逻辑
const resOrObject = await this.options.onUploadFinish(req, res, upload)
// 兼容性处理:支持两种返回类型
// 1. 直接返回 http.ServerResponse 对象
// 2. 返回包含自定义响应信息的对象
if (
// 检查是否为标准 ServerResponse 对象
typeof (resOrObject as http.ServerResponse).write === 'function' &&
typeof (resOrObject as http.ServerResponse).writeHead === 'function'
) {
// 直接使用返回的服务器响应对象
res = resOrObject as http.ServerResponse
} else {
// 处理自定义响应对象的类型定义
// 排除 ServerResponse 类型,确保类型安全
type ExcludeServerResponse<T> = T extends http.ServerResponse ? never : T
// 将返回对象转换为自定义响应对象
const obj = resOrObject as ExcludeServerResponse<typeof resOrObject>
// 更新响应对象
res = obj.res
// 可选地更新响应状态码
if (obj.status_code) responseData.status = obj.status_code
// 可选地更新响应体
if (obj.body) responseData.body = obj.body
// 合并响应头,允许覆盖默认头
if (obj.headers)
responseData.headers = Object.assign(obj.headers, responseData.headers)
}
} catch (error: any) {
// 错误处理:记录上传完成回调中的错误
// 使用日志记录错误信息,并重新抛出异常
log(`onUploadFinish: ${error.body}`)
throw error
}
}
// 处理文件过期时间
if (
this.store.hasExtension('expiration') &&
this.store.getExpiration() > 0 &&
upload.creation_date &&
(upload.size === undefined || newOffset < upload.size)
) {
const creation = new Date(upload.creation_date)
const dateString = new Date(
creation.getTime() + this.store.getExpiration()
).toUTCString()
responseData.headers['Upload-Expires'] = dateString
}
// 发送响应
const writtenRes = this.write(
res,
responseData.status,
responseData.headers,
responseData.body
)
// 触发上传完成事件
if (newOffset === upload.size) {
this.emit(EVENTS.POST_FINISH, req, writtenRes, upload)
}
return writtenRes
} catch (e) {
// 取消操作
context.abort()
throw e
}
}
}

View File

@ -1,257 +0,0 @@
import debug from 'debug'
import { BaseHandler } from './BaseHandler'
import { validateHeader } from '../validators/HeaderValidator'
import type http from 'node:http'
import type { ServerOptions, WithRequired } from '../types'
import { DataStore, Uid, CancellationContext, ERRORS, Metadata, Upload, EVENTS } from '../utils'
const log = debug('tus-node-server:handlers:post')
/**
* PostHandler HTTP POST DataStore
* BaseHandler
*/
export class PostHandler extends BaseHandler {
// 重写 BaseHandler 中的 options 类型,确保在构造函数中设置了 namingFunction
declare options: WithRequired<ServerOptions, 'namingFunction'>
/**
* PostHandler
* @param store - DataStore
* @param options - namingFunction
* @throws namingFunction
*/
constructor(store: DataStore, options: ServerOptions) {
if (options.namingFunction && typeof options.namingFunction !== 'function') {
throw new Error("'namingFunction' must be a function")
}
if (!options.namingFunction) {
options.namingFunction = Uid.rand
}
super(store, options)
}
/**
* DataStore
* @param req - HTTP
* @param res - HTTP
* @param context -
* @returns HTTP
* @throws 'upload-concat' DataStore 'concatentation'
* @throws 'upload-length' 'upload-defer-length'
* @throws 'upload-metadata'
* @throws
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse,
context: CancellationContext
) {
if ('upload-concat' in req.headers && !this.store.hasExtension('concatentation')) {
throw ERRORS.UNSUPPORTED_CONCATENATION_EXTENSION
}
const upload_length = req.headers['upload-length'] as string | undefined
const upload_defer_length = req.headers['upload-defer-length'] as string | undefined
const upload_metadata = req.headers['upload-metadata'] as string | undefined
if (
upload_defer_length !== undefined && // 如果扩展不支持,则抛出错误
!this.store.hasExtension('creation-defer-length')
) {
throw ERRORS.UNSUPPORTED_CREATION_DEFER_LENGTH_EXTENSION
}
if ((upload_length === undefined) === (upload_defer_length === undefined)) {
throw ERRORS.INVALID_LENGTH
}
let metadata: ReturnType<(typeof Metadata)['parse']> | undefined
if ('upload-metadata' in req.headers) {
try {
metadata = Metadata.parse(upload_metadata)
} catch {
throw ERRORS.INVALID_METADATA
}
}
let id: string
try {
id = await this.options.namingFunction(req, metadata)
} catch (error) {
log('create: check your `namingFunction`. Error', error)
throw error
}
const maxFileSize = await this.getConfiguredMaxSize(req, id)
if (
upload_length &&
maxFileSize > 0 &&
Number.parseInt(upload_length, 10) > maxFileSize
) {
throw ERRORS.ERR_MAX_SIZE_EXCEEDED
}
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
const upload = new Upload({
id,
size: upload_length ? Number.parseInt(upload_length, 10) : undefined,
offset: 0,
metadata,
})
if (this.options.onUploadCreate) {
try {
const resOrObject = await this.options.onUploadCreate(req, res, upload)
// 向后兼容,将在下一个主要版本中移除
// 由于在测试中模拟了实例,因此无法使用 `instanceof` 进行检查
if (
typeof (resOrObject as http.ServerResponse).write === 'function' &&
typeof (resOrObject as http.ServerResponse).writeHead === 'function'
) {
res = resOrObject as http.ServerResponse
} else {
// 由于 TS 只理解 instanceof因此类型定义较为丑陋
type ExcludeServerResponse<T> = T extends http.ServerResponse ? never : T
const obj = resOrObject as ExcludeServerResponse<typeof resOrObject>
res = obj.res
if (obj.metadata) {
upload.metadata = obj.metadata
}
}
} catch (error: any) {
log(`onUploadCreate error: ${error.body}`)
throw error
}
}
const lock = await this.acquireLock(req, id, context)
let isFinal: boolean
let url: string
// 推荐的响应默认值
const responseData = {
status: 201,
headers: {} as Record<string, string | number>,
body: '',
}
try {
await this.store.create(upload)
url = this.generateUrl(req, upload.id)
this.emit(EVENTS.POST_CREATE, req, res, upload, url)
isFinal = upload.size === 0 && !upload.sizeIsDeferred
// 如果请求中包含 Content-Type 头,并且使用了 creation-with-upload 扩展
if (validateHeader('content-type', req.headers['content-type'])) {
const bodyMaxSize = await this.calculateMaxBodySize(req, upload, maxFileSize)
const newOffset = await this.writeToStore(req, upload, bodyMaxSize, context)
responseData.headers['Upload-Offset'] = newOffset.toString()
isFinal = newOffset === Number.parseInt(upload_length as string, 10)
upload.offset = newOffset
}
} catch (e) {
context.abort()
throw e
} finally {
await lock.unlock()
}
// 上传完成后的处理逻辑
if (isFinal && this.options.onUploadFinish) {
try {
// 调用自定义的上传完成回调函数,传入请求、响应和上传对象
// 允许用户自定义上传完成后的处理逻辑
const resOrObject = await this.options.onUploadFinish(req, res, upload)
// 兼容性处理:检查返回值是否为 HTTP 响应对象
// 通过检查对象是否具有 write 和 writeHead 方法来判断
if (
typeof (resOrObject as http.ServerResponse).write === 'function' &&
typeof (resOrObject as http.ServerResponse).writeHead === 'function'
) {
// 如果直接返回 HTTP 响应对象,直接覆盖原响应对象
res = resOrObject as http.ServerResponse
} else {
// 处理自定义返回对象的情况
// 使用复杂的类型定义排除 ServerResponse 类型
type ExcludeServerResponse<T> = T extends http.ServerResponse ? never : T
// 将返回对象转换为非 ServerResponse 类型
const obj = resOrObject as ExcludeServerResponse<typeof resOrObject>
// 更新响应对象
res = obj.res
// 根据返回对象更新响应状态码
if (obj.status_code) responseData.status = obj.status_code
// 更新响应体
if (obj.body) responseData.body = obj.body
// 合并响应头,允许覆盖默认头
if (obj.headers)
responseData.headers = Object.assign(obj.headers, responseData.headers)
}
} catch (error: any) {
// 记录上传完成回调中的错误
log(`onUploadFinish: ${error.body}`)
// 抛出错误,中断上传流程
throw error
}
}
// Upload-Expires 响应头指示未完成的上传何时过期。
// 如果在创建时已知过期时间,则必须在响应中包含 Upload-Expires 头
if (
this.store.hasExtension('expiration') &&
this.store.getExpiration() > 0 &&
upload.creation_date
) {
const created = await this.store.getUpload(upload.id)
if (created.offset !== Number.parseInt(upload_length as string, 10)) {
const creation = new Date(upload.creation_date)
// 值必须为 RFC 7231 日期时间格式
responseData.headers['Upload-Expires'] = new Date(
creation.getTime() + this.store.getExpiration()
).toUTCString()
}
}
// 仅在最终的 HTTP 状态码为 201 或 3xx 时附加 Location 头
if (
responseData.status === 201 ||
(responseData.status >= 300 && responseData.status < 400)
) {
responseData.headers.Location = url
}
const writtenRes = this.write(
res,
responseData.status,
responseData.headers,
responseData.body
)
if (isFinal) {
this.emit(EVENTS.POST_FINISH, req, writtenRes, upload)
}
return writtenRes
}
}

View File

@ -1,5 +0,0 @@
export { Server } from './server'
export * from './types'
export * from './lockers'
export * from './utils'
export * from "./store"

View File

@ -1,145 +0,0 @@
/**
* MemoryLocker Locker
* 访
*
*
* - 使访
* -
* -
*
*
* - `lock` `cancelReq`
*
* -
*
*
* - `lock`
* - `unlock` 使使
*/
import { RequestRelease, Locker, ERRORS, Lock } from "../utils"
export interface MemoryLockerOptions {
acquireLockTimeout: number
}
interface LockEntry {
requestRelease: RequestRelease
}
export class MemoryLocker implements Locker {
timeout: number
locks = new Map<string, LockEntry>()
constructor(options?: MemoryLockerOptions) {
this.timeout = options?.acquireLockTimeout ?? 1000 * 30
}
/**
* MemoryLock
* @param id
* @returns MemoryLock
*/
newLock(id: string) {
return new MemoryLock(id, this, this.timeout)
}
}
class MemoryLock implements Lock {
constructor(
private id: string,
private locker: MemoryLocker,
private timeout: number = 1000 * 30
) { }
/**
*
* @param requestRelease
* @throws ERRORS.ERR_LOCK_TIMEOUT
*/
async lock(requestRelease: RequestRelease): Promise<void> {
const abortController = new AbortController()
const lock = await Promise.race([
this.waitTimeout(abortController.signal),
this.acquireLock(this.id, requestRelease, abortController.signal),
])
abortController.abort()
if (!lock) {
throw ERRORS.ERR_LOCK_TIMEOUT
}
}
/**
* ID
* @param id
* @param requestRelease
* @param signal AbortSignal
* @returns true false
*/
protected async acquireLock(
id: string,
requestRelease: RequestRelease,
signal: AbortSignal
): Promise<boolean> {
if (signal.aborted) {
return false
}
const lock = this.locker.locks.get(id)
if (!lock) {
const lock = {
requestRelease,
}
this.locker.locks.set(id, lock)
return true
}
await lock.requestRelease?.()
return await new Promise((resolve, reject) => {
// 使用 setImmediate 的原因:
// 1. 通过将递归调用推迟到下一个事件循环迭代来防止堆栈溢出。
// 2. 允许事件循环处理其他挂起的事件,保持服务器的响应性。
// 3. 通过给其他请求获取锁的机会,确保锁获取的公平性。
setImmediate(() => {
this.acquireLock(id, requestRelease, signal).then(resolve).catch(reject)
})
})
}
/**
*
* @throws
*/
async unlock(): Promise<void> {
const lock = this.locker.locks.get(this.id)
if (!lock) {
throw new Error('Releasing an unlocked lock!')
}
this.locker.locks.delete(this.id)
}
/**
*
* @param signal AbortSignal
* @returns false
*/
protected waitTimeout(signal: AbortSignal) {
return new Promise<boolean>((resolve) => {
const timeout = setTimeout(() => {
resolve(false)
}, this.timeout)
const abortListener = () => {
clearTimeout(timeout)
signal.removeEventListener('abort', abortListener)
resolve(false)
}
signal.addEventListener('abort', abortListener)
})
}
}

View File

@ -1 +0,0 @@
export * from './MemoryLocker'

View File

@ -1,433 +0,0 @@
import http from 'node:http';
import { EventEmitter } from 'node:events';
import debug from 'debug';
import { GetHandler } from './handlers/GetHandler';
import { HeadHandler } from './handlers/HeadHandler';
import { OptionsHandler } from './handlers/OptionsHandler';
import { PatchHandler } from './handlers/PatchHandler';
import { PostHandler } from './handlers/PostHandler';
import { DeleteHandler } from './handlers/DeleteHandler';
import { validateHeader } from './validators/HeaderValidator';
import type stream from 'node:stream';
import type { ServerOptions, RouteHandler, WithOptional } from './types';
import { MemoryLocker } from './lockers';
import {
EVENTS,
Upload,
DataStore,
REQUEST_METHODS,
ERRORS,
TUS_RESUMABLE,
EXPOSED_HEADERS,
CancellationContext,
} from './utils';
/**
*
* TUS服务器支持的各种HTTP方法对应的处理器实例类型
*/
type Handlers = {
GET: InstanceType<typeof GetHandler>; // GET请求处理器
HEAD: InstanceType<typeof HeadHandler>; // HEAD请求处理器
OPTIONS: InstanceType<typeof OptionsHandler>; // OPTIONS请求处理器
PATCH: InstanceType<typeof PatchHandler>; // PATCH请求处理器
POST: InstanceType<typeof PostHandler>; // POST请求处理器
DELETE: InstanceType<typeof DeleteHandler>; // DELETE请求处理器
};
/**
* TUS服务器事件接口定义
*
*/
interface TusEvents {
/**
*
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param upload
* @param url URL
*/
[EVENTS.POST_CREATE]: (req: http.IncomingMessage, res: http.ServerResponse, upload: Upload, url: string) => void;
/**
* @deprecated ()
* 使 POST_RECEIVE_V2
*/
[EVENTS.POST_RECEIVE]: (req: http.IncomingMessage, res: http.ServerResponse, upload: Upload) => void;
/**
* V2版本
* @param req HTTP请求对象
* @param upload
*/
[EVENTS.POST_RECEIVE_V2]: (req: http.IncomingMessage, upload: Upload) => void;
/**
*
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param upload
*/
[EVENTS.POST_FINISH]: (req: http.IncomingMessage, res: http.ServerResponse, upload: Upload) => void;
/**
*
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param id
*/
[EVENTS.POST_TERMINATE]: (req: http.IncomingMessage, res: http.ServerResponse, id: string) => void;
}
/**
* EventEmitter事件处理器类型别名
*/
type on = EventEmitter['on'];
type emit = EventEmitter['emit'];
/**
* TUS服务器接口声明
* EventEmitter,
*/
export declare interface Server {
/**
*
* @param event TusEvents的键之一
* @param listener
* @returns Server实例以支持链式调用
*/
on<Event extends keyof TusEvents>(event: Event, listener: TusEvents[Event]): this;
/**
*
* @param eventName
* @param listener
* @returns Server实例以支持链式调用
*/
on(eventName: Parameters<on>[0], listener: Parameters<on>[1]): this;
/**
*
* @param event TusEvents的键之一
* @param listener
* @returns emit函数的返回值
*/
emit<Event extends keyof TusEvents>(event: Event, listener: TusEvents[Event]): ReturnType<emit>;
/**
*
* @param eventName
* @param listener
* @returns emit函数的返回值
*/
emit(eventName: Parameters<emit>[0], listener: Parameters<emit>[1]): ReturnType<emit>;
}
/**
*
*/
const log = debug('tus-node-server');
// biome-ignore lint/suspicious/noUnsafeDeclarationMerging: it's fine
export class Server extends EventEmitter {
datastore: DataStore;
handlers: Handlers;
options: ServerOptions;
/**
* Server
* @param options -
* @throws optionspath datastore
*/
constructor(
options: WithOptional<ServerOptions, 'locker'> & {
datastore: DataStore;
},
) {
super();
if (!options) {
throw new Error("'options' must be defined");
}
if (!options.path) {
throw new Error("'path' is not defined; must have a path");
}
if (!options.datastore) {
throw new Error("'datastore' is not defined; must have a datastore");
}
if (!options.locker) {
options.locker = new MemoryLocker();
}
if (!options.lockDrainTimeout) {
options.lockDrainTimeout = 3000;
}
if (!options.postReceiveInterval) {
options.postReceiveInterval = 1000;
}
const { datastore, ...rest } = options;
this.options = rest as ServerOptions;
this.datastore = datastore;
this.handlers = {
// GET 请求处理器应在具体实现中编写
GET: new GetHandler(this.datastore, this.options),
// 这些方法按照 tus 协议处理
HEAD: new HeadHandler(this.datastore, this.options),
OPTIONS: new OptionsHandler(this.datastore, this.options),
PATCH: new PatchHandler(this.datastore, this.options),
POST: new PostHandler(this.datastore, this.options),
DELETE: new DeleteHandler(this.datastore, this.options),
};
// 任何以方法为键分配给此对象的处理器将用于响应这些请求。
// 当数据存储分配给服务器时,它们会被设置/重置。
// 从服务器中移除任何事件监听器时,必须先从每个处理器中移除监听器。
// 这必须在添加 'newListener' 监听器之前完成,以避免为所有请求处理器添加 'removeListener' 事件监听器。
this.on('removeListener', (event: string, listener) => {
this.datastore.removeListener(event, listener);
for (const method of REQUEST_METHODS) {
this.handlers[method].removeListener(event, listener);
}
});
// 当事件监听器被添加到服务器时,确保它们从请求处理器冒泡到服务器级别。
this.on('newListener', (event: string, listener) => {
this.datastore.on(event, listener);
for (const method of REQUEST_METHODS) {
this.handlers[method].on(event, listener);
}
});
}
/**
* GET
* @param path -
* @param handler -
*/
get(path: string, handler: RouteHandler) {
this.handlers.GET.registerPath(path, handler);
}
/**
* 'request'
* @param req - HTTP
* @param res - HTTP
* @returns HTTP
*/
async handle(
req: http.IncomingMessage,
res: http.ServerResponse,
// biome-ignore lint/suspicious/noConfusingVoidType: it's fine
): Promise<http.ServerResponse | stream.Writable | void> {
const context = this.createContext(req);
log(`[TusServer] handle: ${req.method} ${req.url}`);
// 允许覆盖 HTTP 方法。这样做的原因是某些库/环境不支持 PATCH 和 DELETE 请求,例如浏览器中的 Flash 和 Java 部分环境
if (req.headers['x-http-method-override']) {
req.method = (req.headers['x-http-method-override'] as string).toUpperCase();
}
const onError = async (error: { status_code?: number; body?: string; message: string }) => {
let status_code = error.status_code || ERRORS.UNKNOWN_ERROR.status_code;
let body = error.body || `${ERRORS.UNKNOWN_ERROR.body}${error.message || ''}\n`;
if (this.options.onResponseError) {
const errorMapping = await this.options.onResponseError(req, res, error as Error);
if (errorMapping) {
status_code = errorMapping.status_code;
body = errorMapping.body;
}
}
return this.write(context, req, res, status_code, body);
};
if (req.method === 'GET') {
const handler = this.handlers.GET;
return handler.send(req, res).catch(onError);
}
// Tus-Resumable 头部必须包含在每个请求和响应中,除了 OPTIONS 请求。其值必须是客户端或服务器使用的协议版本。
res.setHeader('Tus-Resumable', TUS_RESUMABLE);
if (req.method !== 'OPTIONS' && req.headers['tus-resumable'] === undefined) {
return this.write(context, req, res, 412, 'Tus-Resumable Required\n');
}
// 验证所有必需的头部以符合 tus 协议
const invalid_headers: string[] = [];
for (const header_name in req.headers) {
if (req.method === 'OPTIONS') {
continue;
}
// 内容类型仅对 PATCH 请求进行检查。对于所有其他请求方法,它将被忽略并视为未设置内容类型,
// 因为某些 HTTP 客户端可能会为此头部强制执行默认值。
// 参见 https://github.com/tus/tus-node-server/pull/116
if (header_name.toLowerCase() === 'content-type' && req.method !== 'PATCH') {
continue;
}
if (!validateHeader(header_name, req.headers[header_name] as string | undefined)) {
log(`Invalid ${header_name} header: ${req.headers[header_name]}`);
invalid_headers.push(header_name);
}
}
if (invalid_headers.length > 0) {
return this.write(context, req, res, 400, `Invalid ${invalid_headers.join(' ')}\n`);
}
// 启用 CORS
res.setHeader('Access-Control-Allow-Origin', this.getCorsOrigin(req));
res.setHeader('Access-Control-Expose-Headers', EXPOSED_HEADERS);
if (this.options.allowedCredentials === true) {
res.setHeader('Access-Control-Allow-Credentials', 'true');
}
// 调用请求方法的处理器
const handler = this.handlers[req.method as keyof Handlers];
if (handler) {
return handler.send(req, res, context).catch(onError);
}
return this.write(context, req, res, 404, 'Not found\n');
}
/**
* CORS
*
* CORS源地址`origin`
* `origin`
* `*`
*
* @param req HTTP请求对象
* @returns CORS源地址`origin``*`
*
*
* - CORS策略的灵活性
* - `*`CORS配置
*/
private getCorsOrigin(req: http.IncomingMessage): string {
const origin = req.headers.origin;
// 检查请求头中的`origin`是否在允许的源列表中
const isOriginAllowed = this.options.allowedOrigins?.some((allowedOrigin) => allowedOrigin === origin) ?? true;
// 如果`origin`存在且在允许的源列表中,则返回该`origin`
if (origin && isOriginAllowed) {
return origin;
}
// 如果允许的源列表不为空,则返回列表中的第一个源地址
if (this.options.allowedOrigins && this.options.allowedOrigins.length > 0) {
return this.options.allowedOrigins[0]!;
}
// 如果允许的源列表为空,则返回通配符`*`,表示允许所有源地址
return '*';
}
/**
*
* @param context -
* @param req - HTTP
* @param res - HTTP
* @param status - HTTP
* @param body -
* @param headers -
* @returns HTTP
*/
write(
context: CancellationContext,
req: http.IncomingMessage,
res: http.ServerResponse,
status: number,
body = '',
headers: Record<string, string | number> = {},
) {
const isAborted = context.signal.aborted;
if (status !== 204) {
(headers as any)['Content-Length'] = Buffer.byteLength(body, 'utf8');
}
if (isAborted) {
// 此条件处理请求被标记为中止的情况。
// 在这种情况下,服务器通知客户端连接将被关闭。
// 这是通过在响应中设置 'Connection' 头部为 'close' 来传达的。
// 这一步对于防止服务器继续处理不再需要的请求至关重要,从而节省资源。
(headers as any).Connection = 'close';
// 为响应 ('res') 添加 'finish' 事件的事件监听器。
// 'finish' 事件在响应已发送给客户端时触发。
// 一旦响应完成,请求 ('req') 对象将被销毁。
// 销毁请求对象是释放与此请求相关的任何资源的关键步骤,因为它已经被中止。
res.on('finish', () => {
req.destroy();
});
}
res.writeHead(status, headers);
res.write(body);
return res.end();
}
/**
*
* @param args -
* @returns HTTP
*/
// biome-ignore lint/suspicious/noExplicitAny: todo
listen(...args: any[]): http.Server {
return http.createServer(this.handle.bind(this)).listen(...args);
}
/**
*
* @returns
* @throws
*/
cleanUpExpiredUploads(): Promise<number> {
if (!this.datastore.hasExtension('expiration')) {
throw ERRORS.UNSUPPORTED_EXPIRATION_EXTENSION;
}
return this.datastore.deleteExpired();
}
/**
*
* @param req - HTTP
* @returns
*/
protected createContext(req: http.IncomingMessage) {
// 初始化两个 AbortController
// 1. `requestAbortController` 用于即时请求终止,特别适用于在发生错误时停止客户端上传。
// 2. `abortWithDelayController` 用于在终止前引入延迟,允许服务器有时间完成正在进行的操作。
// 这在未来的请求可能需要获取当前请求持有的锁时特别有用。
const requestAbortController = new AbortController();
const abortWithDelayController = new AbortController();
// 当 `abortWithDelayController` 被触发时调用此函数,以在指定延迟后中止请求。
const onDelayedAbort = (err: unknown) => {
abortWithDelayController.signal.removeEventListener('abort', onDelayedAbort);
setTimeout(() => {
requestAbortController.abort(err);
}, this.options.lockDrainTimeout);
};
abortWithDelayController.signal.addEventListener('abort', onDelayedAbort);
// 当请求关闭时,移除监听器以避免内存泄漏。
req.on('close', () => {
abortWithDelayController.signal.removeEventListener('abort', onDelayedAbort);
});
// 返回一个对象,包含信号和两个中止请求的方法。
// `signal` 用于监听请求中止事件。
// `abort` 方法用于立即中止请求。
// `cancel` 方法用于启动延迟中止序列。
return {
signal: requestAbortController.signal,
abort: () => {
// 立即中止请求
if (!requestAbortController.signal.aborted) {
requestAbortController.abort(ERRORS.ABORTED);
}
},
cancel: () => {
// 启动延迟中止序列,除非它已经在进行中。
if (!abortWithDelayController.signal.aborted) {
abortWithDelayController.abort(ERRORS.ABORTED);
}
},
};
}
}

View File

@ -1,230 +0,0 @@
// TODO: use /promises versions
import fs from 'node:fs'
import fsProm from 'node:fs/promises'
import path from 'node:path'
import stream from 'node:stream'
import type http from 'node:http'
import debug from 'debug'
import { DataStore, Upload, ERRORS } from '../../utils'
import {
FileKvStore as FileConfigstore,
MemoryKvStore as MemoryConfigstore,
RedisKvStore as RedisConfigstore,
KvStore as Configstore,
} from '../../utils'
type Options = {
directory: string
configstore?: Configstore
expirationPeriodInMilliseconds?: number
}
const MASK = '0777'
const IGNORED_MKDIR_ERROR = 'EEXIST'
const FILE_DOESNT_EXIST = 'ENOENT'
const log = debug('tus-node-server:stores:filestore')
export class FileStore extends DataStore {
directory: string
configstore: Configstore
expirationPeriodInMilliseconds: number
constructor({ directory, configstore, expirationPeriodInMilliseconds }: Options) {
super()
this.directory = directory
this.configstore = configstore ?? new FileConfigstore(directory)
this.expirationPeriodInMilliseconds = expirationPeriodInMilliseconds ?? 0
this.extensions = [
'creation',
'creation-with-upload',
'creation-defer-length',
'termination',
'expiration',
]
// TODO: this async call can not happen in the constructor
this.checkOrCreateDirectory()
}
/**
* Ensure the directory exists.
*/
private checkOrCreateDirectory() {
fs.mkdir(this.directory, { mode: MASK, recursive: true }, (error) => {
if (error && error.code !== IGNORED_MKDIR_ERROR) {
throw error
}
})
}
/**
* Create an empty file.
*/
async create(file: Upload): Promise<Upload> {
const dirs = file.id.split('/').slice(0, -1)
const filePath = path.join(this.directory, file.id)
await fsProm.mkdir(path.join(this.directory, ...dirs), { recursive: true })
await fsProm.writeFile(filePath, '')
await this.configstore.set(file.id, file)
file.storage = { type: 'file', path: filePath }
return file
}
read(file_id: string) {
return fs.createReadStream(path.join(this.directory, file_id))
}
remove(file_id: string): Promise<void> {
return new Promise((resolve, reject) => {
fs.unlink(`${this.directory}/${file_id}`, (err) => {
if (err) {
log('[FileStore] delete: Error', err)
reject(ERRORS.FILE_NOT_FOUND)
return
}
try {
resolve(this.configstore.delete(file_id))
} catch (error) {
reject(error)
}
})
})
}
write(
readable: http.IncomingMessage | stream.Readable,
file_id: string,
offset: number
): Promise<number> {
const file_path = path.join(this.directory, file_id)
const writeable = fs.createWriteStream(file_path, {
flags: 'r+',
start: offset,
})
let bytes_received = 0
const transform = new stream.Transform({
transform(chunk, _, callback) {
bytes_received += chunk.length
callback(null, chunk)
},
})
return new Promise((resolve, reject) => {
stream.pipeline(readable, transform, writeable, (err) => {
if (err) {
log('[FileStore] write: Error', err)
return reject(ERRORS.FILE_WRITE_ERROR)
}
log(`[FileStore] write: ${bytes_received} bytes written to ${file_path}`)
offset += bytes_received
log(`[FileStore] write: File is now ${offset} bytes`)
return resolve(offset)
})
})
}
async getUpload(id: string): Promise<Upload> {
const file = await this.configstore.get(id)
if (!file) {
throw ERRORS.FILE_NOT_FOUND
}
return new Promise((resolve, reject) => {
const file_path = `${this.directory}/${id}`
fs.stat(file_path, (error, stats) => {
if (error && error.code === FILE_DOESNT_EXIST && file) {
log(
`[FileStore] getUpload: No file found at ${file_path} but db record exists`,
file
)
return reject(ERRORS.FILE_NO_LONGER_EXISTS)
}
if (error && error.code === FILE_DOESNT_EXIST) {
log(`[FileStore] getUpload: No file found at ${file_path}`)
return reject(ERRORS.FILE_NOT_FOUND)
}
if (error) {
return reject(error)
}
if (stats.isDirectory()) {
log(`[FileStore] getUpload: ${file_path} is a directory`)
return reject(ERRORS.FILE_NOT_FOUND)
}
return resolve(
new Upload({
id,
size: file.size,
offset: stats.size,
metadata: file.metadata,
creation_date: file.creation_date,
storage: { type: 'file', path: file_path },
})
)
})
})
}
async declareUploadLength(id: string, upload_length: number) {
const file = await this.configstore.get(id)
if (!file) {
throw ERRORS.FILE_NOT_FOUND
}
file.size = upload_length
await this.configstore.set(id, file)
}
async deleteExpired(): Promise<number> {
const now = new Date()
const toDelete: Promise<void>[] = []
if (!this.configstore.list) {
throw ERRORS.UNSUPPORTED_EXPIRATION_EXTENSION
}
const uploadKeys = await this.configstore.list()
for (const file_id of uploadKeys) {
try {
const info = await this.configstore.get(file_id)
if (
info &&
'creation_date' in info &&
this.getExpiration() > 0 &&
info.size !== info.offset &&
info.creation_date
) {
const creation = new Date(info.creation_date)
const expires = new Date(creation.getTime() + this.getExpiration())
if (now > expires) {
toDelete.push(this.remove(file_id))
}
}
} catch (error) {
if (error !== ERRORS.FILE_NO_LONGER_EXISTS) {
throw error
}
}
}
await Promise.all(toDelete)
return toDelete.length
}
getExpiration(): number {
return this.expirationPeriodInMilliseconds
}
}

View File

@ -1,2 +0,0 @@
export * from "./file-store"
export * from "./s3-store"

View File

@ -1,875 +0,0 @@
import os from 'node:os';
import fs, { promises as fsProm } from 'node:fs';
import stream, { promises as streamProm } from 'node:stream';
import type { Readable } from 'node:stream';
import type AWS from '@aws-sdk/client-s3';
import { NoSuchKey, NotFound, S3, type S3ClientConfig } from '@aws-sdk/client-s3';
import debug from 'debug';
import {
DataStore,
StreamSplitter,
Upload,
ERRORS,
TUS_RESUMABLE,
type KvStore,
MemoryKvStore,
type ChunkInfo,
} from '../../utils';
import { Semaphore, type Permit } from '@shopify/semaphore';
import MultiStream from 'multistream';
import crypto from 'node:crypto';
import path from 'node:path';
const log = debug('tus-node-server:stores:s3store');
type Options = {
// The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
// The server calculates the optimal part size, which takes this size into account,
// but may increase it to not exceed the S3 10K parts limit.
partSize?: number;
useTags?: boolean;
maxConcurrentPartUploads?: number;
cache?: KvStore<MetadataValue>;
expirationPeriodInMilliseconds?: number;
// Options to pass to the AWS S3 SDK.
s3ClientConfig: S3ClientConfig & { bucket: string };
};
export type MetadataValue = {
file: Upload;
'upload-id': string;
'tus-version': string;
};
function calcOffsetFromParts(parts?: Array<AWS.Part>) {
// @ts-expect-error not undefined
return parts && parts.length > 0 ? parts.reduce((a, b) => a + b.Size, 0) : 0;
}
// Implementation (based on https://github.com/tus/tusd/blob/master/s3store/s3store.go)
//
// Once a new tus upload is initiated, multiple objects in S3 are created:
//
// First of all, a new info object is stored which contains (as Metadata) a JSON-encoded
// blob of general information about the upload including its size and meta data.
// This kind of objects have the suffix ".info" in their key.
//
// In addition a new multipart upload
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) is
// created. Whenever a new chunk is uploaded to tus-node-server using a PATCH request, a
// new part is pushed to the multipart upload on S3.
//
// If meta data is associated with the upload during creation, it will be added
// to the multipart upload and after finishing it, the meta data will be passed
// to the final object. However, the metadata which will be attached to the
// final object can only contain ASCII characters and every non-ASCII character
// will be replaced by a question mark (for example, "Menü" will be "Men?").
// However, this does not apply for the metadata returned by the `_getMetadata`
// function since it relies on the info object for reading the metadata.
// Therefore, HEAD responses will always contain the unchanged metadata, Base64-
// encoded, even if it contains non-ASCII characters.
//
// Once the upload is finished, the multipart upload is completed, resulting in
// the entire file being stored in the bucket. The info object, containing
// meta data is not deleted.
//
// Considerations
//
// In order to support tus' principle of resumable upload, S3's Multipart-Uploads
// are internally used.
// For each incoming PATCH request (a call to `write`), a new part is uploaded
// to S3.
export class S3Store extends DataStore {
private bucket: string;
private cache: KvStore<MetadataValue>;
private client: S3;
private preferredPartSize: number;
private expirationPeriodInMilliseconds = 0;
private useTags = true;
private partUploadSemaphore: Semaphore;
public maxMultipartParts = 10_000 as const;
public minPartSize = 5_242_880 as const; // 5MiB
public maxUploadSize = 5_497_558_138_880 as const; // 5TiB
constructor(options: Options) {
super();
const { partSize, s3ClientConfig } = options;
const { bucket, ...restS3ClientConfig } = s3ClientConfig;
this.extensions = ['creation', 'creation-with-upload', 'creation-defer-length', 'termination', 'expiration'];
this.bucket = bucket;
this.preferredPartSize = partSize || 8 * 1024 * 1024;
this.expirationPeriodInMilliseconds = options.expirationPeriodInMilliseconds ?? 0;
this.useTags = options.useTags ?? true;
this.cache = options.cache ?? new MemoryKvStore<MetadataValue>();
this.client = new S3(restS3ClientConfig);
this.partUploadSemaphore = new Semaphore(options.maxConcurrentPartUploads ?? 60);
}
protected shouldUseExpirationTags() {
return this.expirationPeriodInMilliseconds !== 0 && this.useTags;
}
protected useCompleteTag(value: 'true' | 'false') {
if (!this.shouldUseExpirationTags()) {
return undefined;
}
return `Tus-Completed=${value}`;
}
/**
* Saves upload metadata to a `${file_id}.info` file on S3.
* Please note that the file is empty and the metadata is saved
* on the S3 object's `Metadata` field, so that only a `headObject`
* is necessary to retrieve the data.
*/
private async saveMetadata(upload: Upload, uploadId: string) {
log(`[${upload.id}] saving metadata`);
console.log(`[S3Store] Saving metadata for upload ${upload.id}, uploadId: ${uploadId}`);
try {
await this.client.putObject({
Bucket: this.bucket,
Key: this.infoKey(upload.id),
Body: JSON.stringify(upload),
Tagging: this.useCompleteTag('false'),
Metadata: {
'upload-id': uploadId,
'tus-version': TUS_RESUMABLE,
},
});
log(`[${upload.id}] metadata file saved`);
console.log(`[S3Store] Metadata saved successfully for upload ${upload.id}`);
} catch (error) {
console.error(`[S3Store] Failed to save metadata for upload ${upload.id}:`, error);
throw error;
}
}
private async completeMetadata(upload: Upload) {
if (!this.shouldUseExpirationTags()) {
return;
}
const { 'upload-id': uploadId } = await this.getMetadata(upload.id);
await this.client.putObject({
Bucket: this.bucket,
Key: this.infoKey(upload.id),
Body: JSON.stringify(upload),
Tagging: this.useCompleteTag('true'),
Metadata: {
'upload-id': uploadId,
'tus-version': TUS_RESUMABLE,
},
});
}
/**
* Retrieves upload metadata previously saved in `${file_id}.info`.
* There's a small and simple caching mechanism to avoid multiple
* HTTP calls to S3.
*/
private async getMetadata(id: string): Promise<MetadataValue> {
const cached = await this.cache.get(id);
if (cached) {
return cached;
}
const { Metadata, Body } = await this.client.getObject({
Bucket: this.bucket,
Key: this.infoKey(id),
});
const file = JSON.parse((await Body?.transformToString()) as string);
const metadata: MetadataValue = {
'tus-version': Metadata?.['tus-version'] as string,
'upload-id': Metadata?.['upload-id'] as string,
file: new Upload({
id,
size: file.size ? Number.parseInt(file.size, 10) : undefined,
offset: Number.parseInt(file.offset, 10),
metadata: file.metadata,
creation_date: file.creation_date,
storage: file.storage,
}),
};
await this.cache.set(id, metadata);
return metadata;
}
private infoKey(id: string) {
return `${id}.info`;
}
private partKey(id: string, isIncomplete = false) {
if (isIncomplete) {
id += '.part';
}
// TODO: introduce ObjectPrefixing for parts and incomplete parts.
// ObjectPrefix is prepended to the name of each S3 object that is created
// to store uploaded files. It can be used to create a pseudo-directory
// structure in the bucket, e.g. "path/to/my/uploads".
return id;
}
private async uploadPart(
metadata: MetadataValue,
readStream: fs.ReadStream | Readable,
partNumber: number,
): Promise<string> {
console.log(`[S3Store] Starting upload part #${partNumber} for ${metadata.file.id}`);
try {
const data = await this.client.uploadPart({
Bucket: this.bucket,
Key: metadata.file.id,
UploadId: metadata['upload-id'],
PartNumber: partNumber,
Body: readStream,
});
log(`[${metadata.file.id}] finished uploading part #${partNumber}`);
console.log(`[S3Store] Successfully uploaded part #${partNumber} for ${metadata.file.id}, ETag: ${data.ETag}`);
return data.ETag as string;
} catch (error) {
console.error(`[S3Store] Failed to upload part #${partNumber} for ${metadata.file.id}:`, error);
throw error;
}
}
private async uploadIncompletePart(id: string, readStream: fs.ReadStream | Readable): Promise<string> {
console.log(`[S3Store] Starting upload incomplete part for ${id}`);
try {
const data = await this.client.putObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
Body: readStream,
Tagging: this.useCompleteTag('false'),
});
log(`[${id}] finished uploading incomplete part`);
console.log(`[S3Store] Successfully uploaded incomplete part for ${id}, ETag: ${data.ETag}`);
return data.ETag as string;
} catch (error) {
console.error(`[S3Store] Failed to upload incomplete part for ${id}:`, error);
throw error;
}
}
private async downloadIncompletePart(id: string) {
const incompletePart = await this.getIncompletePart(id);
if (!incompletePart) {
return;
}
const filePath = await this.uniqueTmpFileName('tus-s3-incomplete-part-');
try {
let incompletePartSize = 0;
const byteCounterTransform = new stream.Transform({
transform(chunk, _, callback) {
incompletePartSize += chunk.length;
callback(null, chunk);
},
});
// write to temporary file
await streamProm.pipeline(incompletePart, byteCounterTransform, fs.createWriteStream(filePath));
const createReadStream = (options: { cleanUpOnEnd: boolean }) => {
const fileReader = fs.createReadStream(filePath);
if (options.cleanUpOnEnd) {
fileReader.on('end', () => {
fs.unlink(filePath, () => {
// ignore
});
});
fileReader.on('error', (err) => {
fileReader.destroy(err);
fs.unlink(filePath, () => {
// ignore
});
});
}
return fileReader;
};
return {
size: incompletePartSize,
path: filePath,
createReader: createReadStream,
};
} catch (err) {
fsProm.rm(filePath).catch(() => {
/* ignore */
});
throw err;
}
}
private async getIncompletePart(id: string): Promise<Readable | undefined> {
try {
const data = await this.client.getObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
});
return data.Body as Readable;
} catch (error) {
if (error instanceof NoSuchKey) {
return undefined;
}
throw error;
}
}
private async getIncompletePartSize(id: string): Promise<number | undefined> {
try {
const data = await this.client.headObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
});
return data.ContentLength;
} catch (error) {
if (error instanceof NotFound) {
return undefined;
}
throw error;
}
}
private async deleteIncompletePart(id: string): Promise<void> {
await this.client.deleteObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
});
}
/**
* Uploads a stream to s3 using multiple parts
*/
private async uploadParts(
metadata: MetadataValue,
readStream: stream.Readable,
currentPartNumber: number,
offset: number,
): Promise<number> {
console.log(
`[S3Store] uploadParts starting for ${metadata.file.id}, currentPartNumber: ${currentPartNumber}, offset: ${offset}`,
);
const size = metadata.file.size;
const promises: Promise<void>[] = [];
let pendingChunkFilepath: string | null = null;
let bytesUploaded = 0;
let permit: Permit | undefined = undefined;
const optimalPartSize = this.calcOptimalPartSize(size);
console.log(`[S3Store] Using optimal part size: ${optimalPartSize} bytes for ${metadata.file.id}`);
const splitterStream = new StreamSplitter({
chunkSize: optimalPartSize,
directory: os.tmpdir(),
})
.on('beforeChunkStarted', async () => {
console.log(`[S3Store] Acquiring semaphore permit for ${metadata.file.id}`);
permit = await this.partUploadSemaphore.acquire();
})
.on('chunkStarted', (filepath) => {
console.log(`[S3Store] Chunk started for ${metadata.file.id}, file: ${filepath}`);
pendingChunkFilepath = filepath;
})
.on('chunkFinished', (chunkInfo: ChunkInfo) => {
const { size: partSize, path } = chunkInfo;
console.log(`[S3Store] Chunk finished for ${metadata.file.id}, size: ${partSize}, path: ${path}`);
pendingChunkFilepath = null;
const acquiredPermit = permit;
const partNumber = currentPartNumber++;
offset += partSize;
const isFinalPart = size === offset;
console.log(
`[S3Store] Processing part #${partNumber} for ${metadata.file.id}, isFinalPart: ${isFinalPart}, partSize: ${partSize}`,
);
// biome-ignore lint/suspicious/noAsyncPromiseExecutor: it's fine
const deferred = new Promise<void>(async (resolve, reject) => {
try {
// Only the first chunk of each PATCH request can prepend
// an incomplete part (last chunk) from the previous request.
if (!path) {
reject(new Error(`Chunk path is null or undefined for ${metadata.file.id}, part #${partNumber}`));
return;
}
const readable = fs.createReadStream(path);
readable.on('error', reject);
if (partSize >= this.minPartSize || isFinalPart) {
console.log(`[S3Store] Uploading part #${partNumber} for ${metadata.file.id} (${partSize} bytes)`);
await this.uploadPart(metadata, readable, partNumber);
} else {
console.log(`[S3Store] Uploading incomplete part for ${metadata.file.id} (${partSize} bytes)`);
await this.uploadIncompletePart(metadata.file.id, readable);
}
bytesUploaded += partSize;
console.log(
`[S3Store] Part upload completed for ${metadata.file.id}, total bytes uploaded: ${bytesUploaded}`,
);
resolve();
} catch (error) {
console.error(`[S3Store] Part upload failed for ${metadata.file.id}, part #${partNumber}:`, error);
reject(error);
} finally {
if (path) {
fsProm.rm(path).catch(() => {
/* ignore */
});
}
acquiredPermit?.release();
}
});
promises.push(deferred);
})
.on('chunkError', (error) => {
console.error(`[S3Store] Chunk error for ${metadata.file.id}:`, error);
permit?.release();
});
try {
console.log(`[S3Store] Starting stream pipeline for ${metadata.file.id}`);
await streamProm.pipeline(readStream, splitterStream);
console.log(`[S3Store] Stream pipeline completed for ${metadata.file.id}`);
} catch (error) {
console.error(`[S3Store] Stream pipeline failed for ${metadata.file.id}:`, error);
if (pendingChunkFilepath !== null) {
try {
await fsProm.rm(pendingChunkFilepath);
} catch {
log(`[${metadata.file.id}] failed to remove chunk ${pendingChunkFilepath}`);
}
}
promises.push(Promise.reject(error));
} finally {
console.log(`[S3Store] Waiting for all part uploads to complete for ${metadata.file.id}`);
await Promise.all(promises);
console.log(`[S3Store] All part uploads completed for ${metadata.file.id}`);
}
console.log(`[S3Store] uploadParts completed for ${metadata.file.id}, total bytes uploaded: ${bytesUploaded}`);
return bytesUploaded;
}
/**
* Completes a multipart upload on S3.
* This is where S3 concatenates all the uploaded parts.
*/
private async finishMultipartUpload(metadata: MetadataValue, parts: Array<AWS.Part>) {
const response = await this.client.completeMultipartUpload({
Bucket: this.bucket,
Key: metadata.file.id,
UploadId: metadata['upload-id'],
MultipartUpload: {
Parts: parts.map((part) => {
return {
ETag: part.ETag,
PartNumber: part.PartNumber,
};
}),
},
});
return response.Location;
}
/**
* Gets the number of complete parts/chunks already uploaded to S3.
* Retrieves only consecutive parts.
*/
private async retrieveParts(id: string, partNumberMarker?: string): Promise<Array<AWS.Part>> {
const metadata = await this.getMetadata(id);
const params: AWS.ListPartsCommandInput = {
Bucket: this.bucket,
Key: id,
UploadId: metadata['upload-id'],
PartNumberMarker: partNumberMarker,
};
const data = await this.client.listParts(params);
let parts = data.Parts ?? [];
if (data.IsTruncated) {
const rest = await this.retrieveParts(id, data.NextPartNumberMarker);
parts = [...parts, ...rest];
}
if (!partNumberMarker) {
// biome-ignore lint/style/noNonNullAssertion: it's fine
parts.sort((a, b) => a.PartNumber! - b.PartNumber!);
}
return parts;
}
/**
* Removes cached data for a given file.
*/
private async clearCache(id: string) {
log(`[${id}] removing cached data`);
await this.cache.delete(id);
}
private calcOptimalPartSize(size?: number): number {
// When upload size is not know we assume largest possible value (`maxUploadSize`)
if (size === undefined) {
size = this.maxUploadSize;
}
let optimalPartSize: number;
// When upload is smaller or equal to PreferredPartSize, we upload in just one part.
if (size <= this.preferredPartSize) {
optimalPartSize = size;
}
// Does the upload fit in MaxMultipartParts parts or less with PreferredPartSize.
else if (size <= this.preferredPartSize * this.maxMultipartParts) {
optimalPartSize = this.preferredPartSize;
// The upload is too big for the preferred size.
// We devide the size with the max amount of parts and round it up.
} else {
optimalPartSize = Math.ceil(size / this.maxMultipartParts);
}
return optimalPartSize;
}
/**
* Creates a multipart upload on S3 attaching any metadata to it.
* Also, a `${file_id}.info` file is created which holds some information
* about the upload itself like: `upload-id`, `upload-length`, etc.
*/
public async create(upload: Upload) {
log(`[${upload.id}] initializing multipart upload`);
console.log(`[S3Store] Creating multipart upload for ${upload.id}, bucket: ${this.bucket}`);
const request: AWS.CreateMultipartUploadCommandInput = {
Bucket: this.bucket,
Key: upload.id,
Metadata: { 'tus-version': TUS_RESUMABLE },
};
if (upload.metadata?.contentType) {
request.ContentType = upload.metadata.contentType;
console.log(`[S3Store] Setting ContentType: ${upload.metadata.contentType}`);
}
if (upload.metadata?.cacheControl) {
request.CacheControl = upload.metadata.cacheControl;
}
upload.creation_date = new Date().toISOString();
try {
console.log(`[S3Store] Sending createMultipartUpload request for ${upload.id}`);
const res = await this.client.createMultipartUpload(request);
console.log(`[S3Store] Multipart upload created successfully, UploadId: ${res.UploadId}`);
upload.storage = {
type: 's3',
path: res.Key as string,
bucket: this.bucket,
};
await this.saveMetadata(upload, res.UploadId as string);
log(`[${upload.id}] multipart upload created (${res.UploadId})`);
console.log(`[S3Store] Upload creation completed for ${upload.id}`);
return upload;
} catch (error) {
console.error(`[S3Store] Failed to create multipart upload for ${upload.id}:`, error);
throw error;
}
}
async read(id: string) {
const data = await this.client.getObject({
Bucket: this.bucket,
Key: id,
});
return data.Body as Readable;
}
/**
* Write to the file, starting at the provided offset
*/
public async write(src: stream.Readable, id: string, offset: number): Promise<number> {
console.log(`[S3Store] Starting write operation for ${id}, offset: ${offset}`);
try {
// Metadata request needs to happen first
console.log(`[S3Store] Retrieving metadata for ${id}`);
const metadata = await this.getMetadata(id);
console.log(`[S3Store] Retrieved metadata for ${id}, file size: ${metadata.file.size}`);
const parts = await this.retrieveParts(id);
console.log(`[S3Store] Retrieved ${parts.length} existing parts for ${id}`);
// biome-ignore lint/style/noNonNullAssertion: it's fine
const partNumber: number = parts.length > 0 ? (parts[parts.length - 1]?.PartNumber ?? 0) : 0;
const nextPartNumber = partNumber + 1;
console.log(`[S3Store] Next part number will be: ${nextPartNumber}`);
const incompletePart = await this.downloadIncompletePart(id);
const requestedOffset = offset;
if (incompletePart) {
console.log(`[S3Store] Found incomplete part for ${id}, size: ${incompletePart.size}`);
// once the file is on disk, we delete the incomplete part
await this.deleteIncompletePart(id);
offset = requestedOffset - incompletePart.size;
src = new MultiStream([incompletePart.createReader({ cleanUpOnEnd: true }), src]);
}
console.log(`[S3Store] Starting uploadParts for ${id}`);
const bytesUploaded = await this.uploadParts(metadata, src, nextPartNumber, offset);
console.log(`[S3Store] uploadParts completed for ${id}, bytes uploaded: ${bytesUploaded}`);
// The size of the incomplete part should not be counted, because the
// process of the incomplete part should be fully transparent to the user.
const newOffset = requestedOffset + bytesUploaded - (incompletePart?.size ?? 0);
console.log(`[S3Store] New offset for ${id}: ${newOffset}, file size: ${metadata.file.size}`);
if (metadata.file.size === newOffset) {
console.log(`[S3Store] Upload completed for ${id}, finishing multipart upload`);
try {
const parts = await this.retrieveParts(id);
console.log(`[S3Store] Retrieved ${parts.length} parts for completion`);
await this.finishMultipartUpload(metadata, parts);
console.log(`[S3Store] Multipart upload finished successfully for ${id}`);
await this.completeMetadata(metadata.file);
console.log(`[S3Store] Metadata completed for ${id}`);
await this.clearCache(id);
console.log(`[S3Store] Cache cleared for ${id}`);
} catch (error) {
log(`[${id}] failed to finish upload`, error);
console.error(`[S3Store] Failed to finish upload for ${id}:`, error);
throw error;
}
}
return newOffset;
} catch (error) {
console.error(`[S3Store] Write operation failed for ${id}:`, error);
throw error;
}
}
public async getUpload(id: string): Promise<Upload> {
let metadata: MetadataValue;
try {
metadata = await this.getMetadata(id);
} catch (error) {
log('getUpload: No file found.', error);
throw ERRORS.FILE_NOT_FOUND;
}
let offset = 0;
try {
const parts = await this.retrieveParts(id);
offset = calcOffsetFromParts(parts);
} catch (error: any) {
// Check if the error is caused by the upload not being found. This happens
// when the multipart upload has already been completed or aborted. Since
// we already found the info object, we know that the upload has been
// completed and therefore can ensure the the offset is the size.
// AWS S3 returns NoSuchUpload, but other implementations, such as DigitalOcean
// Spaces, can also return NoSuchKey.
if (error.Code === 'NoSuchUpload' || error.Code === 'NoSuchKey') {
return new Upload({
...metadata.file,
offset: metadata.file.size as number,
size: metadata.file.size,
metadata: metadata.file.metadata,
storage: metadata.file.storage,
});
}
log(error);
throw error;
}
const incompletePartSize = await this.getIncompletePartSize(id);
return new Upload({
...metadata.file,
offset: offset + (incompletePartSize ?? 0),
size: metadata.file.size,
storage: metadata.file.storage,
});
}
public async declareUploadLength(file_id: string, upload_length: number) {
const { file, 'upload-id': uploadId } = await this.getMetadata(file_id);
if (!file) {
throw ERRORS.FILE_NOT_FOUND;
}
file.size = upload_length;
await this.saveMetadata(file, uploadId);
}
public async remove(id: string): Promise<void> {
try {
const { 'upload-id': uploadId } = await this.getMetadata(id);
if (uploadId) {
await this.client.abortMultipartUpload({
Bucket: this.bucket,
Key: id,
UploadId: uploadId,
});
}
} catch (error: any) {
if (error?.code && ['NotFound', 'NoSuchKey', 'NoSuchUpload'].includes(error.Code)) {
log('remove: No file found.', error);
throw ERRORS.FILE_NOT_FOUND;
}
throw error;
}
await this.client.deleteObjects({
Bucket: this.bucket,
Delete: {
Objects: [{ Key: id }, { Key: this.infoKey(id) }],
},
});
this.clearCache(id);
}
protected getExpirationDate(created_at: string) {
const date = new Date(created_at);
return new Date(date.getTime() + this.getExpiration());
}
getExpiration(): number {
return this.expirationPeriodInMilliseconds;
}
async deleteExpired(): Promise<number> {
if (this.getExpiration() === 0) {
return 0;
}
let keyMarker: string | undefined = undefined;
let uploadIdMarker: string | undefined = undefined;
let isTruncated = true;
let deleted = 0;
while (isTruncated) {
const listResponse: AWS.ListMultipartUploadsCommandOutput = await this.client.listMultipartUploads({
Bucket: this.bucket,
KeyMarker: keyMarker,
UploadIdMarker: uploadIdMarker,
});
const expiredUploads =
listResponse.Uploads?.filter((multiPartUpload) => {
const initiatedDate = multiPartUpload.Initiated;
return initiatedDate && new Date().getTime() > this.getExpirationDate(initiatedDate.toISOString()).getTime();
}) || [];
const objectsToDelete = expiredUploads.reduce(
(all, expiredUpload) => {
all.push(
{
key: this.infoKey(expiredUpload.Key as string),
},
{
key: this.partKey(expiredUpload.Key as string, true),
},
);
return all;
},
[] as { key: string }[],
);
const deletions: Promise<AWS.DeleteObjectsCommandOutput>[] = [];
// Batch delete 1000 items at a time
while (objectsToDelete.length > 0) {
const objects = objectsToDelete.splice(0, 1000);
deletions.push(
this.client.deleteObjects({
Bucket: this.bucket,
Delete: {
Objects: objects.map((object) => ({
Key: object.key,
})),
},
}),
);
}
const [objectsDeleted] = await Promise.all([
Promise.all(deletions),
...expiredUploads.map((expiredUpload) => {
return this.client.abortMultipartUpload({
Bucket: this.bucket,
Key: expiredUpload.Key,
UploadId: expiredUpload.UploadId,
});
}),
]);
deleted += objectsDeleted.reduce((all, acc) => all + (acc.Deleted?.length ?? 0), 0);
isTruncated = Boolean(listResponse.IsTruncated);
if (isTruncated) {
keyMarker = listResponse.NextKeyMarker;
uploadIdMarker = listResponse.NextUploadIdMarker;
}
}
return deleted;
}
private async uniqueTmpFileName(template: string): Promise<string> {
let tries = 0;
const maxTries = 10;
while (tries < maxTries) {
const fileName = template + crypto.randomBytes(10).toString('base64url').slice(0, 10);
const filePath = path.join(os.tmpdir(), fileName);
try {
await fsProm.lstat(filePath);
// If no error, file exists, so try again
tries++;
} catch (e: any) {
if (e.code === 'ENOENT') {
// File does not exist, return the path
return filePath;
}
throw e; // For other errors, rethrow
}
}
throw new Error(`Could not find a unique file name after ${maxTries} tries`);
}
}

View File

@ -1,211 +0,0 @@
/**
* @file tus协议服务端类型定义文件
* @description tus文件上传服务器所需的各种类型接口
* @version 1.0.0
*/
import type http from 'node:http'
import { Locker, Upload } from './utils'
/**
* tus服务器配置选项接口
* @interface ServerOptions
* @description tus服务器所需的所有选项
*/
export type ServerOptions = {
/**
*
* @example '/files'
*/
path: string
/**
*
* @param req HTTP请求对象
* @param uploadId ID
* @returns ()
*/
maxSize?:
| number
| ((req: http.IncomingMessage, uploadId: string | null) => Promise<number> | number)
/**
* URL作为Location响应头
* @description URL,true则返回相对URL
*/
relativeLocation?: boolean
/**
*
* @description 使ForwardedX-Forwarded-Proto和X-Forwarded-Host头
* Location头
*/
respectForwardedHeaders?: boolean
/**
* CORS允许的自定义请求头
* @description Access-Control-Allow-Headers响应头中
*/
allowedHeaders?: string[]
/**
*
* @description Access-Control-Allow-Credentials响应头
*/
allowedCredentials?: boolean
/**
* CORS允许的来源域名列表
* @description Access-Control-Allow-Origin响应头中
*/
allowedOrigins?: string[]
/**
* ()
* @description EVENTS.POST_RECEIVE_V2事件发送上传进度的时间间隔
*/
postReceiveInterval?: number
/**
* URL生成逻辑
* @param req HTTP请求对象
* @param options URL生成选项
* @returns URL
*/
generateUrl?: (
req: http.IncomingMessage,
options: { proto: string; host: string; path: string; id: string }
) => string
/**
* ID的逻辑
* @param req HTTP请求对象
* @param lastPath URL最后一段路径
* @returns ID
*/
getFileIdFromRequest?: (
req: http.IncomingMessage,
lastPath?: string
) => string | undefined
/**
*
* @description 使crypto.randomBytes(16).toString('hex')
* @param req HTTP请求对象
* @param metadata
* @returns
*/
namingFunction?: (
req: http.IncomingMessage,
metadata?: Record<string, string | null>
) => string | Promise<string>
/**
*
* @description 访
*/
locker:
| Locker
| Promise<Locker>
| ((req: http.IncomingMessage) => Locker | Promise<Locker>)
/**
* ()
* @description
*/
lockDrainTimeout?: number
/**
*
* @description true时无法删除已上传完成的文件
*/
disableTerminationForFinishedUploads?: boolean
/**
*
* @description ,
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param upload
* @throws
*/
onUploadCreate?: (
req: http.IncomingMessage,
res: http.ServerResponse,
upload: Upload
) => Promise<
http.ServerResponse | { res: http.ServerResponse; metadata?: Upload['metadata'] }
>
/**
*
* @description ,
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param upload
* @throws
*/
onUploadFinish?: (
req: http.IncomingMessage,
res: http.ServerResponse,
upload: Upload
) => Promise<
| http.ServerResponse
| {
res: http.ServerResponse
status_code?: number
headers?: Record<string, string | number>
body?: string
}
>
/**
*
* @description
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param uploadId ID
*/
onIncomingRequest?: (
req: http.IncomingMessage,
res: http.ServerResponse,
uploadId: string
) => Promise<void>
/**
*
* @description ,
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param err
*/
onResponseError?: (
req: http.IncomingMessage,
res: http.ServerResponse,
err: Error | { status_code: number; body: string }
) =>
| Promise<{ status_code: number; body: string } | undefined>
| { status_code: number; body: string }
| undefined
}
/**
*
* @description HTTP请求处理函数类型定义
*/
export type RouteHandler = (req: http.IncomingMessage, res: http.ServerResponse) => void
/**
* 工具类型:使指定属性变为可选
* @template T
* @template K
*/
export type WithOptional<T, K extends keyof T> = Omit<T, K> & { [P in K]+?: T[P] }
/**
* 工具类型:使指定属性变为必需
* @template T
* @template K
*/
export type WithRequired<T, K extends keyof T> = T & { [P in K]-?: T[P] }

View File

@ -1,132 +0,0 @@
/**
* TUS协议相关的常量
* TUS是一种基于HTTP的可恢复文件上传协议
*/
// 定义TUS协议支持的HTTP请求方法
export const REQUEST_METHODS = ['POST', 'HEAD', 'PATCH', 'OPTIONS', 'DELETE'] as const
// 定义TUS协议中使用的HTTP头部信息
export const HEADERS = [
'Authorization',
'Content-Type',
'Location',
'Tus-Extension',
'Tus-Max-Size',
'Tus-Resumable',
'Tus-Version',
'Upload-Concat',
'Upload-Defer-Length',
'Upload-Length',
'Upload-Metadata',
'Upload-Offset',
'X-HTTP-Method-Override',
'X-Requested-With',
'X-Forwarded-Host',
'X-Forwarded-Proto',
'Forwarded',
] as const
// 将头部信息转换为小写形式,便于处理
export const HEADERS_LOWERCASE = HEADERS.map((header) => {
return header.toLowerCase()
}) as Array<Lowercase<(typeof HEADERS)[number]>>
// 定义允许的头部信息、请求方法和暴露的头部信息
export const ALLOWED_HEADERS = HEADERS.join(', ')
export const ALLOWED_METHODS = REQUEST_METHODS.join(', ')
export const EXPOSED_HEADERS = HEADERS.join(', ')
// 定义TUS协议中可能遇到的错误信息
export const ERRORS = {
MISSING_OFFSET: {
status_code: 403,
body: 'Upload-Offset header required\n',
},
ABORTED: {
status_code: 400,
body: 'Request aborted due to lock acquired',
},
INVALID_TERMINATION: {
status_code: 400,
body: 'Cannot terminate an already completed upload',
},
ERR_LOCK_TIMEOUT: {
status_code: 500,
body: 'failed to acquire lock before timeout',
},
INVALID_CONTENT_TYPE: {
status_code: 403,
body: 'Content-Type header required\n',
},
FILE_NOT_FOUND: {
status_code: 404,
body: 'The file for this url was not found\n',
},
INVALID_OFFSET: {
status_code: 409,
body: 'Upload-Offset conflict\n',
},
FILE_NO_LONGER_EXISTS: {
status_code: 410,
body: 'The file for this url no longer exists\n',
},
ERR_SIZE_EXCEEDED: {
status_code: 413,
body: "upload's size exceeded\n",
},
ERR_MAX_SIZE_EXCEEDED: {
status_code: 413,
body: 'Maximum size exceeded\n',
},
INVALID_LENGTH: {
status_code: 400,
body: 'Upload-Length or Upload-Defer-Length header required\n',
},
INVALID_METADATA: {
status_code: 400,
body: 'Upload-Metadata is invalid. It MUST consist of one or more comma-separated key-value pairs. The key and value MUST be separated by a space. The key MUST NOT contain spaces and commas and MUST NOT be empty. The key SHOULD be ASCII encoded and the value MUST be Base64 encoded. All keys MUST be unique',
},
UNKNOWN_ERROR: {
status_code: 500,
body: 'Something went wrong with that request\n',
},
FILE_WRITE_ERROR: {
status_code: 500,
body: 'Something went wrong receiving the file\n',
},
UNSUPPORTED_CONCATENATION_EXTENSION: {
status_code: 501,
body: 'Concatenation extension is not (yet) supported. Disable parallel uploads in the tus client.\n',
},
UNSUPPORTED_CREATION_DEFER_LENGTH_EXTENSION: {
status_code: 501,
body: 'creation-defer-length extension is not (yet) supported.\n',
},
UNSUPPORTED_EXPIRATION_EXTENSION: {
status_code: 501,
body: 'expiration extension is not (yet) supported.\n',
},
} as const
// 定义TUS协议中的事件类型
export const POST_CREATE = 'POST_CREATE' as const
/** @deprecated this is almost the same as POST_FINISH, use POST_RECEIVE_V2 instead */
export const POST_RECEIVE = 'POST_RECEIVE' as const
export const POST_RECEIVE_V2 = 'POST_RECEIVE_V2' as const
export const POST_FINISH = 'POST_FINISH' as const
export const POST_TERMINATE = 'POST_TERMINATE' as const
export const EVENTS = {
POST_CREATE,
/** @deprecated this is almost the same as POST_FINISH, use POST_RECEIVE_V2 instead */
POST_RECEIVE,
POST_RECEIVE_V2,
POST_FINISH,
POST_TERMINATE,
} as const
// 定义TUS协议中的最大年龄和版本信息
export const MAX_AGE = 86_400 as const
export const TUS_RESUMABLE = '1.0.0' as const
export const TUS_VERSION = ['1.0.0'] as const

View File

@ -1,3 +0,0 @@
export * from './models'
export * from './constants'
export * from './kvstores'

View File

@ -1,92 +0,0 @@
import fs from 'node:fs/promises';
import path from 'node:path';
import type { KvStore } from './Types';
import type { Upload } from '../models';
/**
* FileKvStore
*
* @description
* @remarks
* - JSON元数据存储在磁盘上
* - 使
*
* @typeparam T Upload类型
*/
export class FileKvStore<T = Upload> implements KvStore<T> {
/** 存储目录路径 */
directory: string;
/**
*
*
* @param path
*/
constructor(path: string) {
this.directory = path;
}
/**
*
*
* @param key
* @returns undefined
*/
async get(key: string): Promise<T | undefined> {
try {
// 读取对应键的JSON文件
const buffer = await fs.readFile(this.resolve(key), 'utf8');
// 解析JSON并返回
return JSON.parse(buffer as string);
} catch {
// 文件不存在或读取失败时返回undefined
return undefined;
}
}
/**
*
* @param key
* @param value
*/
async set(key: string, value: T): Promise<void> {
// 将值转换为JSON并写入文件
await fs.writeFile(this.resolve(key), JSON.stringify(value));
}
/**
*
*
* @param key
*/
async delete(key: string): Promise<void> {
// 删除对应的JSON文件
await fs.rm(this.resolve(key));
}
/**
*
*
* @returns
*/
async list(): Promise<Array<string>> {
// 读取目录中的所有文件
const files = await fs.readdir(this.directory);
// 对文件名进行排序
const sorted = files.sort((a, b) => a.localeCompare(b));
// 提取文件名(不包含扩展名)
const name = (file: string) => path.basename(file, '.json');
// 过滤出有效的tus文件ID
// 仅保留成对出现的文件(文件名相同,一个有.json扩展名
return sorted.filter((file, idx) => idx < sorted.length - 1 && name(file) === name(sorted[idx + 1]!));
}
/**
*
*
* @param key
* @returns
* @private
*/
private resolve(key: string): string {
// 将键名转换为完整的JSON文件路径
return path.resolve(this.directory, `${key}.json`);
}
}

View File

@ -1,54 +0,0 @@
import type {Redis as IoRedis} from 'ioredis'
import type {KvStore} from './Types'
import type {Upload} from '../models'
export class IoRedisKvStore<T = Upload> implements KvStore<T> {
constructor(
private redis: IoRedis,
private prefix = ''
) {
this.redis = redis
this.prefix = prefix
}
private prefixed(key: string): string {
return `${this.prefix}${key}`
}
async get(key: string): Promise<T | undefined> {
return this.deserializeValue(await this.redis.get(this.prefixed(key)))
}
async set(key: string, value: T): Promise<void> {
await this.redis.set(this.prefixed(key), this.serializeValue(value))
}
async delete(key: string): Promise<void> {
await this.redis.del(this.prefixed(key))
}
async list(): Promise<Array<string>> {
const keys = new Set<string>()
let cursor = '0'
do {
const [next, batch] = await this.redis.scan(
cursor,
'MATCH',
this.prefixed('*'),
'COUNT',
'20'
)
cursor = next
for (const key of batch) keys.add(key)
} while (cursor !== '0')
return Array.from(keys)
}
private serializeValue(value: T): string {
return JSON.stringify(value)
}
private deserializeValue(buffer: string | null): T | undefined {
return buffer ? JSON.parse(buffer) : undefined
}
}

View File

@ -1,26 +0,0 @@
import type {Upload} from '../models'
import type {KvStore} from './Types'
/**
* Memory based configstore.
* Used mostly for unit tests.
*/
export class MemoryKvStore<T = Upload> implements KvStore<T> {
data: Map<string, T> = new Map()
async get(key: string): Promise<T | undefined> {
return this.data.get(key)
}
async set(key: string, value: T): Promise<void> {
this.data.set(key, value)
}
async delete(key: string): Promise<void> {
this.data.delete(key)
}
async list(): Promise<Array<string>> {
return [...this.data.keys()]
}
}

View File

@ -1,94 +0,0 @@
import type { RedisClientType } from '@redis/client'
import type { KvStore } from './Types'
import type { Upload } from '../models'
/**
* Redis
* KvStore 使 Redis
*
* 使
* -
* -
*
* @author Mitja Puzigaća <mitjap@gmail.com>
*/
export class RedisKvStore<T = Upload> implements KvStore<T> {
/**
* RedisKvStore
*
* @param redis Redis Redis
* @param prefix
*/
constructor(
private redis: RedisClientType,
private prefix = ''
) {
this.redis = redis
this.prefix = prefix
}
/**
*
*
* @param key
* @returns undefined
*/
async get(key: string): Promise<T | undefined> {
return this.deserializeValue(await this.redis.get(this.prefix + key))
}
/**
*
*
* @param key
* @param value
*/
async set(key: string, value: T): Promise<void> {
await this.redis.set(this.prefix + key, this.serializeValue(value))
}
/**
*
*
* @param key
*/
async delete(key: string): Promise<void> {
await this.redis.del(this.prefix + key)
}
/**
*
*
* @returns
*/
async list(): Promise<Array<string>> {
const keys = new Set<string>()
let cursor = 0
do {
const result = await this.redis.scan(cursor, { MATCH: `${this.prefix}*`, COUNT: 20 })
cursor = result.cursor
for (const key of result.keys) keys.add(key)
} while (cursor !== 0)
return Array.from(keys)
}
/**
*
*
* @param value
* @returns
*/
private serializeValue(value: T): string {
return JSON.stringify(value)
}
/**
*
*
* @param buffer
* @returns undefined
*/
private deserializeValue(buffer: string | null): T | undefined {
return buffer ? JSON.parse(buffer) : undefined
}
}

View File

@ -1,43 +0,0 @@
/**
*
* @description
* @module KvStore
* @remarks
*/
import type { Upload } from '../models'
/**
*
* @template T Upload
* @description
* @interface
*/
export interface KvStore<T = Upload> {
/**
*
* @param key
* @returns undefined
*/
get(key: string): Promise<T | undefined>
/**
*
* @param key
* @param value
* @returns
*/
set(key: string, value: T): Promise<void>
/**
*
* @param key
* @returns
*/
delete(key: string): Promise<void>
/**
*
* @returns
*/
list?(): Promise<Array<string>>
}

View File

@ -1,5 +0,0 @@
export { FileKvStore } from './FileKvStore'
export { MemoryKvStore } from './MemoryKvStore'
export { RedisKvStore } from './RedisKvStore'
export { IoRedisKvStore } from './IoRedisKvStore'
export type { KvStore } from './Types'

View File

@ -1,14 +0,0 @@
/**
* CancellationContext接口
*
*
* 使
* - /
* -
* -
*/
export interface CancellationContext {
signal: AbortSignal
abort: () => void
cancel: () => void
}

View File

@ -1,72 +0,0 @@
import EventEmitter from 'node:events'
import {Upload} from './Upload'
import type stream from 'node:stream'
import type http from 'node:http'
export class DataStore extends EventEmitter {
extensions: string[] = []
hasExtension(extension: string) {
return this.extensions?.includes(extension)
}
/**
* Called in POST requests. This method just creates a
* file, implementing the creation extension.
*
* http://tus.io/protocols/resumable-upload.html#creation
*/
async create(file: Upload) {
return file
}
/**
* Called in DELETE requests. This method just deletes the file from the store.
* http://tus.io/protocols/resumable-upload.html#termination
*/
async remove(id: string) {}
/**
* Called in PATCH requests. This method should write data
* to the DataStore file, and possibly implement the
* concatenation extension.
*
* http://tus.io/protocols/resumable-upload.html#concatenation
*/
async write(
stream: http.IncomingMessage | stream.Readable,
id: string,
offset: number
) {
return 0
}
/**
* Called in HEAD requests. This method should return the bytes
* written to the DataStore, for the client to know where to resume
* the upload.
*/
async getUpload(id: string): Promise<Upload> {
return new Upload({
id,
size: 0,
offset: 0,
storage: {type: 'datastore', path: ''},
})
}
/**
* Called in PATCH requests when upload length is known after being defered.
*/
async declareUploadLength(id: string, upload_length: number) {}
/**
* Returns number of expired uploads that were deleted.
*/
async deleteExpired(): Promise<number> {
return 0
}
getExpiration(): number {
return 0
}
}

View File

@ -1,12 +0,0 @@
export type RequestRelease = () => Promise<void> | void
export interface Locker {
newLock(id: string): Lock
}
export interface Lock {
lock(cancelReq: RequestRelease): Promise<void>
unlock(): Promise<void>
}

View File

@ -1,104 +0,0 @@
import type { Upload } from './Upload';
// 定义ASCII码中的空格和逗号字符的码点
const ASCII_SPACE = ' '.codePointAt(0);
const ASCII_COMMA = ','.codePointAt(0);
// 定义用于验证Base64字符串的正则表达式
const BASE64_REGEX = /^[\d+/A-Za-z]*={0,2}$/;
/**
*
* @param key
* @returns truefalse
*/
export function validateKey(key: string) {
// 如果键的长度为0则无效
if (key.length === 0) {
return false;
}
// 遍历键的每个字符,检查其码点是否在有效范围内
for (let i = 0; i < key.length; ++i) {
const charCodePoint = key.codePointAt(i) as number;
if (
charCodePoint > 127 || // 非ASCII字符
charCodePoint === ASCII_SPACE || // 空格字符
charCodePoint === ASCII_COMMA // 逗号字符
) {
return false;
}
}
return true;
}
/**
*
* @param value
* @returns Base64字符串则返回truefalse
*/
export function validateValue(value: string) {
// Base64字符串的长度必须是4的倍数
if (value.length % 4 !== 0) {
return false;
}
// 使用正则表达式验证Base64字符串的格式
return BASE64_REGEX.test(value);
}
/**
*
* @param str
* @returns
* @throws
*/
export function parse(str?: string) {
const meta: Record<string, string | null> = {};
// 如果字符串为空或仅包含空白字符,则无效
if (!str || str.trim().length === 0) {
throw new Error('Metadata string is not valid');
}
// 遍历字符串中的每个键值对
for (const pair of str.split(',')) {
const tokens = pair.split(' ');
const [key, value] = tokens;
// 验证键和值的有效性,并确保键在元数据对象中不存在
if (
key &&
((tokens.length === 1 && validateKey(key)) ||
(tokens.length === 2 && validateKey(key) && value && validateValue(value))) &&
!(key in meta)
) {
// 如果值存在则将其从Base64解码为UTF-8字符串
const decodedValue = value ? Buffer.from(value, 'base64').toString('utf8') : null;
meta[key] = decodedValue;
} else {
throw new Error('Metadata string is not valid');
}
}
return meta;
}
/**
*
* @param metadata
* @returns
*/
export function stringify(metadata: NonNullable<Upload['metadata']>): string {
return Object.entries(metadata)
.map(([key, value]) => {
// 如果值为null则仅返回键
if (value === null) {
return key;
}
// 将值编码为Base64字符串并与键组合
const encodedValue = Buffer.from(value, 'utf8').toString('base64');
return `${key} ${encodedValue}`;
})
.join(',');
}

View File

@ -1,54 +0,0 @@
import { Transform, type TransformCallback } from 'node:stream'
import { ERRORS } from '../constants'
// TODO: create HttpError and use it everywhere instead of throwing objects
/**
* MaxFileExceededError
* Error
*/
export class MaxFileExceededError extends Error {
status_code: number
body: string
constructor() {
super(ERRORS.ERR_MAX_SIZE_EXCEEDED.body)
this.status_code = ERRORS.ERR_MAX_SIZE_EXCEEDED.status_code
this.body = ERRORS.ERR_MAX_SIZE_EXCEEDED.body
Object.setPrototypeOf(this, MaxFileExceededError.prototype)
}
}
/**
* StreamLimiter
* Transform
*/
export class StreamLimiter extends Transform {
private maxSize: number // 允许的最大流大小
private currentSize = 0 // 当前流的大小
/**
* StreamLimiter
* @param maxSize
*/
constructor(maxSize: number) {
super()
this.maxSize = maxSize
}
/**
* _transform Transform
*
* MaxFileExceededError
* @param chunk
* @param encoding
* @param callback
*/
_transform(chunk: Buffer, encoding: BufferEncoding, callback: TransformCallback): void {
this.currentSize += chunk.length // 更新当前流的大小
if (this.currentSize > this.maxSize) {
callback(new MaxFileExceededError()) // 如果超出最大限制,抛出错误
} else {
callback(null, chunk) // 否则,继续处理数据块
}
}
}

View File

@ -1,190 +0,0 @@
/* global BufferEncoding */
import crypto from 'node:crypto';
import fs from 'node:fs/promises';
import path from 'node:path';
import stream from 'node:stream';
/**
*
* @param size
* @returns
*/
function randomString(size: number) {
return crypto.randomBytes(size).toString('base64url').slice(0, size);
}
/**
*
*/
export type ChunkInfo = {
path: string | null; // 块文件路径
size: number; // 块大小
};
/**
* StreamSplitter
*/
type Options = {
chunkSize: number; // 每个块的大小
directory: string; // 存储块的目录
};
/**
*
*/
type Callback = (error: Error | null) => void;
/**
* StreamSplitter
*/
export class StreamSplitter extends stream.Writable {
directory: Options['directory']; // 存储块的目录
currentChunkPath: string | null; // 当前块的路径
currentChunkSize: number; // 当前块的大小
fileHandle: fs.FileHandle | null; // 当前块的文件句柄
filenameTemplate: string; // 文件名模板
chunkSize: Options['chunkSize']; // 每个块的大小
part: number; // 当前块的编号
/**
*
* @param chunkSize
* @param directory
* @param options
*/
constructor({ chunkSize, directory }: Options, options?: stream.WritableOptions) {
super(options);
this.chunkSize = chunkSize;
this.currentChunkPath = null;
this.currentChunkSize = 0;
this.fileHandle = null;
this.directory = directory;
this.filenameTemplate = randomString(10);
this.part = 0;
this.on('error', this._handleError.bind(this));
}
/**
*
* @param chunk
* @param _ 使
* @param callback
*/
async _write(chunk: Buffer, _: BufferEncoding, callback: Callback) {
try {
// 如果当前没有文件句柄,则创建一个新的块
if (this.fileHandle === null) {
await this._newChunk();
}
let overflow = this.currentChunkSize + chunk.length - this.chunkSize;
// 如果写入的数据会导致当前块超过指定大小,则进行分割
while (overflow > 0) {
// 只写入不超过指定大小的部分
await this._writeChunk(chunk.subarray(0, chunk.length - overflow));
await this._finishChunk();
// 剩余的数据写入新的块
await this._newChunk();
chunk = chunk.subarray(chunk.length - overflow, chunk.length);
overflow = this.currentChunkSize + chunk.length - this.chunkSize;
}
// 如果数据块小于指定大小,则直接写入
await this._writeChunk(chunk);
callback(null);
} catch (error: any) {
callback(error);
}
}
/**
*
* @param callback
*/
async _final(callback: Callback) {
if (this.fileHandle === null) {
callback(null);
return;
}
try {
await this._finishChunk();
callback(null);
} catch (error: any) {
callback(error);
}
}
/**
*
* @param chunk
*/
async _writeChunk(chunk: Buffer): Promise<void> {
await fs.appendFile(this.fileHandle as fs.FileHandle, chunk);
this.currentChunkSize += chunk.length;
}
/**
*
*/
async _handleError() {
await this.emitEvent('chunkError', this.currentChunkPath);
// 如果发生错误,停止写入操作,防止数据丢失
if (this.fileHandle === null) {
return;
}
await this.fileHandle.close();
this.currentChunkPath = null;
this.fileHandle = null;
}
/**
*
*/
async _finishChunk(): Promise<void> {
if (this.fileHandle === null) {
return;
}
await this.fileHandle.close();
await this.emitEvent('chunkFinished', {
path: this.currentChunkPath,
size: this.currentChunkSize,
});
this.currentChunkPath = null;
this.fileHandle = null;
this.currentChunkSize = 0;
this.part += 1;
}
/**
*
* @param name
* @param payload
*/
async emitEvent<T>(name: string, payload: T) {
const listeners = this.listeners(name);
for (const listener of listeners) {
await listener(payload);
}
}
/**
*
*/
async _newChunk(): Promise<void> {
const currentChunkPath = path.join(this.directory, `${this.filenameTemplate}-${this.part}`);
await this.emitEvent('beforeChunkStarted', currentChunkPath);
this.currentChunkPath = currentChunkPath;
const fileHandle = await fs.open(this.currentChunkPath, 'w');
await this.emitEvent('chunkStarted', this.currentChunkPath);
this.currentChunkSize = 0;
this.fileHandle = fileHandle;
}
}

View File

@ -1,21 +0,0 @@
import crypto from 'node:crypto'
/**
* Uid
*
* UID
* 使ID
*/
export const Uid = {
/**
*
*
* 使 Node.js crypto 16
*
*
* @returns {string} 32
*/
rand(): string {
return crypto.randomBytes(16).toString('hex')
},
}

View File

@ -1,72 +0,0 @@
/**
* 模块: Upload
* 文件功能描述: 该模块定义了上传文件的数据模型
* 使用场景: 用于管理文件上传过程中的元数据和状态Web应用或服务
*/
/**
* 类型: TUpload
* 核心功能概述: 定义了上传文件的数据结构ID
*/
type TUpload = {
id: string // 文件唯一标识符
size?: number // 文件大小,可选
offset: number // 文件上传的偏移量
metadata?: Record<string, string | null> // 文件的元数据,可选
storage?: { // 文件的存储信息,可选
type: string // 存储类型
path: string // 存储路径
bucket?: string // 存储桶,可选
}
creation_date?: string // 文件创建日期,可选
}
/**
* : Upload
* 核心功能概述: 封装了上传文件的数据模型访
* 设计模式解析: 使用构造函数模式初始化对象getter方法提供属性访问
* 使:
* const upload = new Upload({ id: '123', size: 1024, offset: 0 });
* console.log(upload.sizeIsDeferred); // 检查文件大小是否延迟
*/
export class Upload {
id: TUpload['id'] // 文件ID
metadata: TUpload['metadata'] // 文件元数据
size: TUpload['size'] // 文件大小
offset: TUpload['offset'] // 文件上传偏移量
creation_date: TUpload['creation_date'] // 文件创建日期
storage: TUpload['storage'] // 文件存储信息
/**
*
* 功能详细描述: 初始化Upload对象ID属性
* :
* - upload: TUpload类型
* 异常处理机制: 如果未提供ID
*/
constructor(upload: TUpload) {
// 检查ID是否存在不存在则抛出错误
if (!upload.id) {
throw new Error('[File] constructor must be given an ID')
}
// 初始化属性
this.id = upload.id
this.size = upload.size
this.offset = upload.offset
this.metadata = upload.metadata
this.storage = upload.storage
// 如果未提供创建日期,则设置为当前时间
this.creation_date = upload.creation_date ?? new Date().toISOString()
}
/**
* 方法: sizeIsDeferred
* 功能详细描述: 检查文件大小是否未定义
* 返回值说明: 返回布尔值true表示文件大小未定义false表示已定义
*/
get sizeIsDeferred(): boolean {
return this.size === undefined
}
}

View File

@ -1,8 +0,0 @@
export { DataStore } from './DataStore';
export * as Metadata from './Metadata';
export { StreamSplitter, type ChunkInfo } from './StreamSplitter';
export { StreamLimiter } from './StreamLimiter';
export { Uid } from './Uid';
export { Upload } from './Upload';
export type { Locker, Lock, RequestRelease } from './Locker';
export type { CancellationContext } from './Context';

View File

@ -1,138 +0,0 @@
/**
* TUS协议头部验证器
*
* TUS协议中各种HTTP头部的验证逻辑
* TUS是一个用于可恢复文件上传的开放协议
*
* @version 1.0.0
* @see https://tus.io/protocols/resumable-upload.html
*/
import { Metadata, TUS_VERSION, TUS_RESUMABLE } from "../utils"
/** 验证器函数类型定义,接收可选的字符串值,返回布尔值表示验证结果 */
type validator = (value?: string) => boolean
/**
* TUS协议头部验证器映射表
* TUS协议规定的HTTP头部的验证规则
*/
export const validators = new Map<string, validator>([
[
'upload-offset',
/**
* Upload-Offset头部验证
*
*
*/
(value) => {
const n = Number(value)
return Number.isInteger(n) && String(n) === value && n >= 0
},
],
[
'upload-length',
/**
* Upload-Length头部验证
*
*
*/
(value) => {
const n = Number(value)
return Number.isInteger(n) && String(n) === value && n >= 0
},
],
[
'upload-defer-length',
/**
* Upload-Defer-Length头部验证
* ,
* 1,
*/
(value) => value === '1',
],
[
'upload-metadata',
/**
* Upload-Metadata头部验证
*
*
*
* ASCII编码,Base64编码
*
*/
(value) => {
try {
Metadata.parse(value)
return true
} catch {
return false
}
},
],
[
'x-forwarded-proto',
/**
* X-Forwarded-Proto头部验证
*
* http或https
*/
(value) => {
if (value === 'http' || value === 'https') {
return true
}
return false
},
],
[
'tus-version',
/**
* Tus-Version头部验证
*
* ,
*/
(value) => {
return TUS_VERSION.includes(value as any)
},
],
[
'tus-resumable',
/**
* Tus-Resumable头部验证
* OPTIONS请求外的每个请求和响应都必须包含
* 使
*/
(value) => value === TUS_RESUMABLE,
],
['content-type', (value) => value === 'application/offset+octet-stream'],
[
'upload-concat',
/**
* Upload-Concat头部验证
*
* partial
* final开头,URL列表
*/
(value) => {
if (!value) return false
const valid_partial = value === 'partial'
const valid_final = value.startsWith('final;')
return valid_partial || valid_final
},
],
])
/**
* HTTP头部值是否符合TUS协议规范
* @param name
* @param value
* @returns
*/
export function validateHeader(name: string, value?: string): boolean {
const lowercaseName = name.toLowerCase()
if (!validators.has(lowercaseName)) {
return true
}
return validators.get(lowercaseName)!(value)
}

View File

@ -1,40 +0,0 @@
{
"compilerOptions": {
"target": "es2022",
"module": "esnext",
"lib": [
"DOM",
"es2022"
],
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"moduleResolution": "node",
"removeComments": true,
"skipLibCheck": true,
"strict": true,
"isolatedModules": true,
"esModuleInterop": true,
"noUnusedLocals": false,
"noUnusedParameters": false,
"noImplicitReturns": false,
"noFallthroughCasesInSwitch": false,
"noUncheckedIndexedAccess": false,
"noImplicitOverride": false,
"noPropertyAccessFromIndexSignature": false,
"emitDeclarationOnly": true,
"outDir": "dist",
"incremental": true,
"tsBuildInfoFile": "./dist/tsconfig.tsbuildinfo"
},
"include": [
"src"
],
"exclude": [
"node_modules",
"dist",
"**/*.test.ts",
"**/*.spec.ts",
"**/__tests__"
]
}

View File

@ -1,20 +0,0 @@
import { defineConfig } from 'tsup';
export default defineConfig({
entry: ['src/index.ts'],
format: ['esm', 'cjs'],
dts: true,
clean: true,
outDir: 'dist',
treeshake: true,
sourcemap: true,
external: [
'@aws-sdk/client-s3',
'@shopify/semaphore',
'debug',
'lodash.throttle',
'multistream',
'ioredis',
'@redis/client',
],
});

View File

@ -95,7 +95,7 @@ importers:
devDependencies: devDependencies:
'@types/bun': '@types/bun':
specifier: latest specifier: latest
version: 1.2.14 version: 1.2.15
'@types/node': '@types/node':
specifier: ^22.15.21 specifier: ^22.15.21
version: 22.15.21 version: 22.15.21
@ -453,6 +453,9 @@ importers:
'@aws-sdk/client-s3': '@aws-sdk/client-s3':
specifier: ^3.723.0 specifier: ^3.723.0
version: 3.817.0 version: 3.817.0
'@aws-sdk/s3-request-presigner':
specifier: ^3.817.0
version: 3.817.0
'@hono/zod-validator': '@hono/zod-validator':
specifier: ^0.5.0 specifier: ^0.5.0
version: 0.5.0(hono@4.7.10)(zod@3.25.23) version: 0.5.0(hono@4.7.10)(zod@3.25.23)
@ -768,6 +771,10 @@ packages:
resolution: {integrity: sha512-9x2QWfphkARZY5OGkl9dJxZlSlYM2l5inFeo2bKntGuwg4A4YUe5h7d5yJ6sZbam9h43eBrkOdumx03DAkQF9A==} resolution: {integrity: sha512-9x2QWfphkARZY5OGkl9dJxZlSlYM2l5inFeo2bKntGuwg4A4YUe5h7d5yJ6sZbam9h43eBrkOdumx03DAkQF9A==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
'@aws-sdk/s3-request-presigner@3.817.0':
resolution: {integrity: sha512-FMV0YefefGwPqIbGcHdkkHaiVWKIZoI0wOhYhYDZI129aUD5+CEOtTi7KFp1iJjAK+Cx9bW5tAYc+e9shaWEyQ==}
engines: {node: '>=18.0.0'}
'@aws-sdk/signature-v4-multi-region@3.816.0': '@aws-sdk/signature-v4-multi-region@3.816.0':
resolution: {integrity: sha512-idcr9NW86sSIXASSej3423Selu6fxlhhJJtMgpAqoCH/HJh1eQrONJwNKuI9huiruPE8+02pwxuePvLW46X2mw==} resolution: {integrity: sha512-idcr9NW86sSIXASSej3423Selu6fxlhhJJtMgpAqoCH/HJh1eQrONJwNKuI9huiruPE8+02pwxuePvLW46X2mw==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
@ -788,6 +795,10 @@ packages:
resolution: {integrity: sha512-N6Lic98uc4ADB7fLWlzx+1uVnq04VgVjngZvwHoujcRg9YDhIg9dUDiTzD5VZv13g1BrPYmvYP1HhsildpGV6w==} resolution: {integrity: sha512-N6Lic98uc4ADB7fLWlzx+1uVnq04VgVjngZvwHoujcRg9YDhIg9dUDiTzD5VZv13g1BrPYmvYP1HhsildpGV6w==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
'@aws-sdk/util-format-url@3.804.0':
resolution: {integrity: sha512-1nOwSg7B0bj5LFGor0udF/HSdvDuSCxP+NC0IuSOJ5RgJ2AphFo03pLtK2UwArHY5WWZaejAEz5VBND6xxOEhA==}
engines: {node: '>=18.0.0'}
'@aws-sdk/util-locate-window@3.804.0': '@aws-sdk/util-locate-window@3.804.0':
resolution: {integrity: sha512-zVoRfpmBVPodYlnMjgVjfGoEZagyRF5IPn3Uo6ZvOZp24chnW/FRstH7ESDHDDRga4z3V+ElUQHKpFDXWyBW5A==} resolution: {integrity: sha512-zVoRfpmBVPodYlnMjgVjfGoEZagyRF5IPn3Uo6ZvOZp24chnW/FRstH7ESDHDDRga4z3V+ElUQHKpFDXWyBW5A==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
@ -2543,8 +2554,8 @@ packages:
'@types/body-parser@1.19.5': '@types/body-parser@1.19.5':
resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==} resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==}
'@types/bun@1.2.14': '@types/bun@1.2.15':
resolution: {integrity: sha512-VsFZKs8oKHzI7zwvECiAJ5oSorWndIWEVhfbYqZd4HI/45kzW7PN2Rr5biAzvGvRuNmYLSANY+H59ubHq8xw7Q==} resolution: {integrity: sha512-U1ljPdBEphF0nw1MIk0hI7kPg7dFdPyM7EenHsp6W5loNHl7zqy6JQf/RKCgnUn2KDzUpkBwHPnEJEjII594bA==}
'@types/command-line-args@5.2.3': '@types/command-line-args@5.2.3':
resolution: {integrity: sha512-uv0aG6R0Y8WHZLTamZwtfsDLVRnOa+n+n5rEvFWL5Na5gZ8V2Teab/duDPFzIIIhs9qizDpcavCusCLJZu62Kw==} resolution: {integrity: sha512-uv0aG6R0Y8WHZLTamZwtfsDLVRnOa+n+n5rEvFWL5Na5gZ8V2Teab/duDPFzIIIhs9qizDpcavCusCLJZu62Kw==}
@ -2938,8 +2949,8 @@ packages:
buffer@5.7.1: buffer@5.7.1:
resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==}
bun-types@1.2.14: bun-types@1.2.15:
resolution: {integrity: sha512-Kuh4Ub28ucMRWeiUUWMHsT9Wcbr4H3kLIO72RZZElSDxSu7vpetRvxIUDUaW6QtaIeixIpm7OXtNnZPf82EzwA==} resolution: {integrity: sha512-NarRIaS+iOaQU1JPfyKhZm4AsUOrwUOqRNHY0XxI8GI8jYxiLXLcdjYMG9UKS+fwWasc1uw1htV9AX24dD+p4w==}
bundle-require@4.2.1: bundle-require@4.2.1:
resolution: {integrity: sha512-7Q/6vkyYAwOmQNRw75x+4yRtZCZJXUDmHHlFdkiV0wgv/reNjtJwpu1jPJ0w2kbEpIM0uoKI3S4/f39dU7AjSA==} resolution: {integrity: sha512-7Q/6vkyYAwOmQNRw75x+4yRtZCZJXUDmHHlFdkiV0wgv/reNjtJwpu1jPJ0w2kbEpIM0uoKI3S4/f39dU7AjSA==}
@ -6307,6 +6318,17 @@ snapshots:
'@smithy/util-middleware': 4.0.3 '@smithy/util-middleware': 4.0.3
tslib: 2.8.1 tslib: 2.8.1
'@aws-sdk/s3-request-presigner@3.817.0':
dependencies:
'@aws-sdk/signature-v4-multi-region': 3.816.0
'@aws-sdk/types': 3.804.0
'@aws-sdk/util-format-url': 3.804.0
'@smithy/middleware-endpoint': 4.1.7
'@smithy/protocol-http': 5.1.1
'@smithy/smithy-client': 4.3.0
'@smithy/types': 4.3.0
tslib: 2.8.1
'@aws-sdk/signature-v4-multi-region@3.816.0': '@aws-sdk/signature-v4-multi-region@3.816.0':
dependencies: dependencies:
'@aws-sdk/middleware-sdk-s3': 3.816.0 '@aws-sdk/middleware-sdk-s3': 3.816.0
@ -6344,6 +6366,13 @@ snapshots:
'@smithy/util-endpoints': 3.0.5 '@smithy/util-endpoints': 3.0.5
tslib: 2.8.1 tslib: 2.8.1
'@aws-sdk/util-format-url@3.804.0':
dependencies:
'@aws-sdk/types': 3.804.0
'@smithy/querystring-builder': 4.0.3
'@smithy/types': 4.3.0
tslib: 2.8.1
'@aws-sdk/util-locate-window@3.804.0': '@aws-sdk/util-locate-window@3.804.0':
dependencies: dependencies:
tslib: 2.8.1 tslib: 2.8.1
@ -8002,9 +8031,9 @@ snapshots:
'@types/connect': 3.4.38 '@types/connect': 3.4.38
'@types/node': 20.17.50 '@types/node': 20.17.50
'@types/bun@1.2.14': '@types/bun@1.2.15':
dependencies: dependencies:
bun-types: 1.2.14 bun-types: 1.2.15
'@types/command-line-args@5.2.3': {} '@types/command-line-args@5.2.3': {}
@ -8491,7 +8520,7 @@ snapshots:
base64-js: 1.5.1 base64-js: 1.5.1
ieee754: 1.2.1 ieee754: 1.2.1
bun-types@1.2.14: bun-types@1.2.15:
dependencies: dependencies:
'@types/node': 20.17.50 '@types/node': 20.17.50

261
test-minio-config.js Normal file
View File

@ -0,0 +1,261 @@
#!/usr/bin/env node
/**
* MinIO配置测试脚本
* 基于用户提供的具体配置进行测试
*/
const { S3 } = require('@aws-sdk/client-s3');
const fs = require('fs');
const path = require('path');
async function testMinIOConfig() {
console.log('🔍 开始测试MinIO配置...\n');
// 用户提供的配置
const config = {
endpoint: 'http://localhost:9000',
region: 'us-east-1',
credentials: {
accessKeyId: '7Nt7OyHkwIoo3zvSKdnc',
secretAccessKey: 'EZ0cyrjJAsabTLNSqWcU47LURMppBW2kka3LuXzb',
},
forcePathStyle: true,
};
const bucketName = 'test123';
const uploadDir = '/opt/projects/nice/uploads';
console.log('📋 配置信息:');
console.log(` Endpoint: ${config.endpoint}`);
console.log(` Region: ${config.region}`);
console.log(` Bucket: ${bucketName}`);
console.log(` Upload Dir: ${uploadDir}`);
console.log(` Access Key: ${config.credentials.accessKeyId}`);
console.log(` Force Path Style: ${config.forcePathStyle}`);
console.log();
try {
const s3Client = new S3(config);
// 1. 测试基本连接和认证
console.log('📡 测试连接和认证...');
try {
const buckets = await s3Client.listBuckets();
console.log('✅ 连接和认证成功!');
console.log(`📂 现有存储桶: ${buckets.Buckets?.map((b) => b.Name).join(', ') || '无'}`);
} catch (error) {
console.log('❌ 连接失败:', error.message);
if (error.message.includes('ECONNREFUSED')) {
console.log('💡 提示: MinIO服务可能未运行请检查localhost:9000是否可访问');
} else if (error.message.includes('Invalid')) {
console.log('💡 提示: 检查访问密钥和密钥是否正确');
}
return false;
}
// 2. 检查目标存储桶
console.log(`\n🪣 检查存储桶 "${bucketName}"...`);
let bucketExists = false;
try {
await s3Client.headBucket({ Bucket: bucketName });
console.log(`✅ 存储桶 "${bucketName}" 存在并可访问`);
bucketExists = true;
} catch (error) {
if (error.name === 'NotFound') {
console.log(`❌ 存储桶 "${bucketName}" 不存在`);
console.log('🔧 尝试创建存储桶...');
try {
await s3Client.createBucket({ Bucket: bucketName });
console.log(`✅ 存储桶 "${bucketName}" 创建成功`);
bucketExists = true;
} catch (createError) {
console.log(`❌ 创建存储桶失败: ${createError.message}`);
return false;
}
} else {
console.log(`❌ 检查存储桶时出错: ${error.message}`);
return false;
}
}
if (!bucketExists) {
return false;
}
// 3. 检查上传目录
console.log(`\n📁 检查上传目录 "${uploadDir}"...`);
try {
if (!fs.existsSync(uploadDir)) {
console.log('📁 上传目录不存在,正在创建...');
fs.mkdirSync(uploadDir, { recursive: true });
console.log('✅ 上传目录创建成功');
} else {
console.log('✅ 上传目录存在');
}
} catch (error) {
console.log(`❌ 检查/创建上传目录失败: ${error.message}`);
}
// 4. 测试文件上传
console.log('\n📤 测试文件上传...');
const testFileName = `test-upload-${Date.now()}.txt`;
const testContent = `这是一个测试文件
创建时间: ${new Date().toISOString()}
用户: nice1234
MinIO测试成功`;
try {
await s3Client.putObject({
Bucket: bucketName,
Key: testFileName,
Body: testContent,
ContentType: 'text/plain',
Metadata: {
'test-type': 'config-validation',
'created-by': 'test-script',
},
});
console.log(`✅ 文件上传成功: ${testFileName}`);
} catch (error) {
console.log(`❌ 文件上传失败: ${error.message}`);
console.log('错误详情:', error);
return false;
}
// 5. 测试文件下载验证
console.log('\n📥 测试文件下载验证...');
try {
const result = await s3Client.getObject({
Bucket: bucketName,
Key: testFileName,
});
// 读取流内容
const chunks = [];
for await (const chunk of result.Body) {
chunks.push(chunk);
}
const downloadedContent = Buffer.concat(chunks).toString();
if (downloadedContent === testContent) {
console.log('✅ 文件下载验证成功,内容一致');
} else {
console.log('❌ 文件内容不一致');
return false;
}
} catch (error) {
console.log(`❌ 文件下载失败: ${error.message}`);
return false;
}
// 6. 测试分片上传
console.log('\n🔄 测试分片上传功能...');
const multipartKey = `multipart-test-${Date.now()}.dat`;
try {
const multipartUpload = await s3Client.createMultipartUpload({
Bucket: bucketName,
Key: multipartKey,
Metadata: {
'test-type': 'multipart-upload',
},
});
console.log(`✅ 分片上传初始化成功: ${multipartUpload.UploadId}`);
// 清理测试
await s3Client.abortMultipartUpload({
Bucket: bucketName,
Key: multipartKey,
UploadId: multipartUpload.UploadId,
});
console.log('✅ 分片上传测试完成并清理');
} catch (error) {
console.log(`❌ 分片上传测试失败: ${error.message}`);
return false;
}
// 7. 列出存储桶中的文件
console.log('\n📂 列出存储桶中的文件...');
try {
const listResult = await s3Client.listObjectsV2({
Bucket: bucketName,
MaxKeys: 10,
});
console.log(`✅ 存储桶中共有 ${listResult.KeyCount || 0} 个文件`);
if (listResult.Contents && listResult.Contents.length > 0) {
console.log('最近的文件:');
listResult.Contents.slice(-5).forEach((obj, index) => {
const size = obj.Size < 1024 ? `${obj.Size}B` : `${Math.round(obj.Size / 1024)}KB`;
console.log(` ${index + 1}. ${obj.Key} (${size})`);
});
}
} catch (error) {
console.log(`❌ 列出文件失败: ${error.message}`);
}
// 8. 清理测试文件
console.log('\n🧹 清理测试文件...');
try {
await s3Client.deleteObject({
Bucket: bucketName,
Key: testFileName,
});
console.log('✅ 测试文件清理完成');
} catch (error) {
console.log(`⚠️ 清理测试文件失败: ${error.message}`);
}
console.log('\n🎉 所有测试通过您的MinIO配置完全正确');
console.log('\n📝 配置摘要:');
console.log('- ✅ 连接正常');
console.log('- ✅ 认证有效');
console.log('- ✅ 存储桶可用');
console.log('- ✅ 文件上传/下载正常');
console.log('- ✅ 分片上传支持');
console.log('\n💡 您可以在应用中使用这些配置:');
console.log('STORAGE_TYPE=s3');
console.log(`UPLOAD_DIR=${uploadDir}`);
console.log(`S3_ENDPOINT=${config.endpoint}`);
console.log(`S3_REGION=${config.region}`);
console.log(`S3_BUCKET=${bucketName}`);
console.log(`S3_ACCESS_KEY_ID=${config.credentials.accessKeyId}`);
console.log('S3_SECRET_ACCESS_KEY=***');
console.log('S3_FORCE_PATH_STYLE=true');
return true;
} catch (error) {
console.log(`❌ 测试过程中发生未预期错误: ${error.message}`);
console.log('错误堆栈:', error.stack);
return false;
}
}
// 主函数
async function main() {
console.log('🚀 MinIO S3存储配置测试\n');
// 检查依赖
try {
require('@aws-sdk/client-s3');
} catch (error) {
console.log('❌ 缺少必要依赖 @aws-sdk/client-s3');
console.log('请运行: npm install @aws-sdk/client-s3');
process.exit(1);
}
const success = await testMinIOConfig();
if (success) {
console.log('\n✅ 测试完成MinIO配置正确可以正常使用');
process.exit(0);
} else {
console.log('\n❌ 测试失败:请检查上述错误并修复配置');
process.exit(1);
}
}
main().catch((error) => {
console.error('❌ 脚本执行失败:', error);
process.exit(1);
});