01080052
This commit is contained in:
parent
eca128de5f
commit
24bb496198
|
@ -32,8 +32,6 @@
|
|||
"@nice/common": "workspace:*",
|
||||
"@nice/tus": "workspace:*",
|
||||
"@trpc/server": "11.0.0-rc.456",
|
||||
"@tus/file-store": "^1.5.1",
|
||||
"@tus/s3-store": "^1.6.2",
|
||||
"argon2": "^0.41.1",
|
||||
"axios": "^1.7.2",
|
||||
"bullmq": "^5.12.0",
|
||||
|
@ -49,9 +47,13 @@
|
|||
"mime-types": "^2.1.35",
|
||||
"minio": "^8.0.1",
|
||||
"mitt": "^3.0.1",
|
||||
"nanoid": "^5.0.9",
|
||||
"nanoid-cjs": "^0.0.7",
|
||||
"pinyin-pro": "^3.26.0",
|
||||
"reflect-metadata": "^0.2.0",
|
||||
"rxjs": "^7.8.1",
|
||||
"sharp": "^0.33.5",
|
||||
"slugify": "^1.6.6",
|
||||
"socket.io": "^4.7.5",
|
||||
"superjson-cjs": "^2.2.3",
|
||||
"transliteration": "^2.3.5",
|
||||
|
|
|
@ -28,6 +28,7 @@ export class AuthController {
|
|||
host,
|
||||
authorization
|
||||
};
|
||||
|
||||
const authResult = await this.authService.validateFileRequest(fileRequest);
|
||||
if (!authResult.isValid) {
|
||||
// 使用枚举类型进行错误处理
|
||||
|
|
|
@ -6,10 +6,13 @@ import { TrpcService } from '@server/trpc/trpc.service';
|
|||
import { DepartmentService } from '@server/models/department/department.service';
|
||||
import { SessionService } from './session.service';
|
||||
import { RoleMapModule } from '@server/models/rbac/rbac.module';
|
||||
|
||||
@Module({
|
||||
imports: [StaffModule, RoleMapModule],
|
||||
providers: [AuthService, TrpcService, DepartmentService, SessionService],
|
||||
providers: [
|
||||
AuthService,
|
||||
TrpcService,
|
||||
DepartmentService,
|
||||
SessionService],
|
||||
exports: [AuthService],
|
||||
controllers: [AuthController],
|
||||
})
|
||||
|
|
|
@ -20,30 +20,35 @@ import { SessionInfo, SessionService } from './session.service';
|
|||
import { tokenConfig } from './config';
|
||||
import { z } from 'zod';
|
||||
import { FileAuthResult, FileRequest, FileValidationErrorType } from './types';
|
||||
import { extractFilePathFromUri } from '@server/utils/file';
|
||||
import { TusService } from '@server/upload/tus.service';
|
||||
import { extractFileIdFromNginxUrl } from '@server/upload/utils';
|
||||
@Injectable()
|
||||
export class AuthService {
|
||||
private logger = new Logger(AuthService.name)
|
||||
constructor(
|
||||
private readonly staffService: StaffService,
|
||||
private readonly jwtService: JwtService,
|
||||
private readonly sessionService: SessionService,
|
||||
) { }
|
||||
private readonly sessionService: SessionService
|
||||
) {
|
||||
|
||||
}
|
||||
async validateFileRequest(params: FileRequest): Promise<FileAuthResult> {
|
||||
try {
|
||||
// 基础参数验证
|
||||
if (!params?.originalUri) {
|
||||
return { isValid: false, error: FileValidationErrorType.INVALID_URI };
|
||||
}
|
||||
const fileId = extractFilePathFromUri(params.originalUri);
|
||||
const fileId = extractFileIdFromNginxUrl(params.originalUri);
|
||||
console.log(params.originalUri, fileId)
|
||||
const resource = await db.resource.findFirst({ where: { fileId } });
|
||||
|
||||
// 资源验证
|
||||
if (!resource) {
|
||||
return { isValid: false, error: FileValidationErrorType.RESOURCE_NOT_FOUND };
|
||||
}
|
||||
// 处理公开资源
|
||||
if (resource.isPublic) {
|
||||
|
||||
return {
|
||||
isValid: true,
|
||||
resourceType: resource.type || 'unknown'
|
||||
|
@ -58,6 +63,7 @@ export class AuthService {
|
|||
if (!payload.sub) {
|
||||
return { isValid: false, error: FileValidationErrorType.INVALID_TOKEN };
|
||||
}
|
||||
|
||||
return {
|
||||
isValid: true,
|
||||
userId: payload.sub,
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
import path, { dirname } from "path";
|
||||
import { FileMetadata, VideoMetadata, ResourceProcessor } from "../types";
|
||||
import { Resource, ResourceStatus, db } from "@nice/common";
|
||||
import { Logger } from "@nestjs/common";
|
||||
import fs from 'fs/promises';
|
||||
|
||||
export abstract class BaseProcessor implements ResourceProcessor {
|
||||
constructor() { }
|
||||
protected logger = new Logger(BaseProcessor.name)
|
||||
|
||||
abstract process(resource: Resource): Promise<Resource>
|
||||
protected createOutputDir(filepath: string, subdirectory: string = 'assets'): string {
|
||||
const outputDir = path.join(
|
||||
path.dirname(filepath),
|
||||
subdirectory,
|
||||
);
|
||||
fs.mkdir(outputDir, { recursive: true }).catch(err => this.logger.error(`Failed to create directory: ${err.message}`));
|
||||
|
||||
return outputDir;
|
||||
|
||||
}
|
||||
}
|
||||
//
|
|
@ -3,33 +3,28 @@ import sharp from 'sharp';
|
|||
import { FileMetadata, ImageMetadata, ResourceProcessor } from "../types";
|
||||
import { Resource, ResourceStatus, db } from "@nice/common";
|
||||
import { getUploadFilePath } from "@server/utils/file";
|
||||
import { Logger } from "@nestjs/common";
|
||||
import { BaseProcessor } from "./BaseProcessor";
|
||||
|
||||
export class ImageProcessor implements ResourceProcessor {
|
||||
private logger = new Logger(ImageProcessor.name)
|
||||
constructor() { }
|
||||
export class ImageProcessor extends BaseProcessor {
|
||||
constructor() { super() }
|
||||
|
||||
async process(resource: Resource): Promise<Resource> {
|
||||
const { fileId } = resource;
|
||||
const filepath = getUploadFilePath(fileId);
|
||||
const { url } = resource;
|
||||
const filepath = getUploadFilePath(url);
|
||||
const originMeta = resource.metadata as unknown as FileMetadata;
|
||||
|
||||
if (!originMeta.mimeType?.startsWith('image/')) {
|
||||
this.logger.log(`Skipping non-image resource: ${resource.id}`);
|
||||
return resource;
|
||||
}
|
||||
|
||||
try {
|
||||
const image = sharp(filepath);
|
||||
const metadata = await image.metadata();
|
||||
if (!metadata) {
|
||||
throw new Error(`Failed to get metadata for image: ${fileId}`);
|
||||
throw new Error(`Failed to get metadata for image: ${url}`);
|
||||
}
|
||||
// Create WebP compressed version
|
||||
const compressedPath = path.join(
|
||||
path.dirname(filepath),
|
||||
`${path.basename(filepath, path.extname(filepath))}_compressed.webp`
|
||||
);
|
||||
const compressedDir = this.createOutputDir(filepath, "compressed")
|
||||
const compressedPath = path.join(compressedDir, `${path.basename(filepath, path.extname(filepath))}.webp`);
|
||||
await image
|
||||
.webp({
|
||||
quality: 80,
|
||||
|
@ -40,12 +35,10 @@ export class ImageProcessor implements ResourceProcessor {
|
|||
const imageMeta: ImageMetadata = {
|
||||
width: metadata.width || 0,
|
||||
height: metadata.height || 0,
|
||||
compressedUrl: path.basename(compressedPath),
|
||||
orientation: metadata.orientation,
|
||||
space: metadata.space,
|
||||
hasAlpha: metadata.hasAlpha,
|
||||
}
|
||||
console.log(imageMeta)
|
||||
const updatedResource = await db.resource.update({
|
||||
where: { id: resource.id },
|
||||
data: {
|
||||
|
|
|
@ -1,9 +1,167 @@
|
|||
// import ffmpeg from 'fluent-ffmpeg';
|
||||
// import { ResourceProcessor } from '../types';
|
||||
// import { Resource } from '@nice/common';
|
||||
import path, { dirname } from "path";
|
||||
import ffmpeg from 'fluent-ffmpeg';
|
||||
import { FileMetadata, VideoMetadata, ResourceProcessor } from "../types";
|
||||
import { Resource, ResourceStatus, db } from "@nice/common";
|
||||
import { getUploadFilePath } from "@server/utils/file";
|
||||
import fs from 'fs/promises';
|
||||
import sharp from 'sharp';
|
||||
import { BaseProcessor } from "./BaseProcessor";
|
||||
|
||||
// export class VideoProcessor implements ResourceProcessor {
|
||||
// async process(resource: Resource): Promise<Resource> {
|
||||
export class VideoProcessor extends BaseProcessor {
|
||||
constructor() { super() }
|
||||
async process(resource: Resource): Promise<Resource> {
|
||||
const { url} = resource;
|
||||
const filepath = getUploadFilePath(url);
|
||||
this.logger.log(`Processing video for resource ID: ${resource.id}, File ID: ${url}`);
|
||||
|
||||
// }
|
||||
// }
|
||||
const originMeta = resource.metadata as unknown as FileMetadata;
|
||||
if (!originMeta.mimeType?.startsWith('video/')) {
|
||||
this.logger.log(`Skipping non-video resource: ${resource.id}`);
|
||||
return resource;
|
||||
}
|
||||
|
||||
try {
|
||||
const streamDir = this.createOutputDir(filepath, 'stream');
|
||||
const [m3u8Path, videoMetadata, coverUrl] = await Promise.all([
|
||||
this.generateM3U8Stream(filepath, streamDir),
|
||||
this.getVideoMetadata(filepath),
|
||||
this.generateVideoCover(filepath, dirname(filepath))
|
||||
]);
|
||||
|
||||
const videoMeta: VideoMetadata = {
|
||||
...videoMetadata,
|
||||
coverUrl: coverUrl,
|
||||
};
|
||||
|
||||
const updatedResource = await db.resource.update({
|
||||
where: { id: resource.id },
|
||||
data: {
|
||||
metadata: {
|
||||
...originMeta,
|
||||
...videoMeta,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
this.logger.log(`Successfully processed video for resource ID: ${resource.id}`);
|
||||
return updatedResource;
|
||||
} catch (error: any) {
|
||||
this.logger.error(`Failed to process video for resource ID: ${resource.id}, Error: ${error.message}`);
|
||||
throw new Error(`Failed to process video: ${error.message}`);
|
||||
}
|
||||
}
|
||||
private async generateVideoCover(filepath: string, outputDir: string): Promise<string> {
|
||||
this.logger.log(`Generating video cover for: ${filepath}`);
|
||||
const jpgCoverPath = path.join(outputDir, 'cover.jpg');
|
||||
const webpCoverPath = path.join(outputDir, 'cover.webp');
|
||||
return new Promise((resolve, reject) => {
|
||||
ffmpeg(filepath)
|
||||
.on('end', async () => {
|
||||
try {
|
||||
// 使用 Sharp 将 JPG 转换为 WebP
|
||||
await sharp(jpgCoverPath)
|
||||
.webp({ quality: 80 }) // 设置 WebP 压缩质量
|
||||
.toFile(webpCoverPath);
|
||||
|
||||
// 删除临时 JPG 文件
|
||||
await fs.unlink(jpgCoverPath);
|
||||
|
||||
this.logger.log(`Video cover generated at: ${webpCoverPath}`);
|
||||
resolve(path.basename(webpCoverPath));
|
||||
} catch (error: any) {
|
||||
this.logger.error(`Error converting cover to WebP: ${error.message}`);
|
||||
reject(error);
|
||||
}
|
||||
})
|
||||
.on('error', (err) => {
|
||||
this.logger.error(`Error generating video cover: ${err.message}`);
|
||||
reject(err);
|
||||
})
|
||||
.screenshots({
|
||||
count: 1,
|
||||
folder: outputDir,
|
||||
filename: 'cover.jpg',
|
||||
size: '640x360'
|
||||
});
|
||||
});
|
||||
}
|
||||
private async getVideoDuration(filepath: string): Promise<number> {
|
||||
this.logger.log(`Getting video duration for file: ${filepath}`);
|
||||
return new Promise((resolve, reject) => {
|
||||
ffmpeg.ffprobe(filepath, (err, metadata) => {
|
||||
if (err) {
|
||||
this.logger.error(`Error getting video duration: ${err.message}`);
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
const duration = metadata.format.duration || 0;
|
||||
this.logger.log(`Video duration: ${duration} seconds`);
|
||||
resolve(duration);
|
||||
});
|
||||
});
|
||||
}
|
||||
private async generateM3U8Stream(filepath: string, outputDir: string): Promise<string> {
|
||||
const m3u8Path = path.join(outputDir, 'index.m3u8');
|
||||
this.logger.log(`Generating M3U8 stream for video: ${filepath}, Output Dir: ${outputDir}`);
|
||||
return new Promise<string>((resolve, reject) => {
|
||||
ffmpeg(filepath)
|
||||
.outputOptions([
|
||||
// Improved video encoding settings
|
||||
'-c:v libx264',
|
||||
'-preset medium', // Balance between encoding speed and compression
|
||||
'-crf 23', // Constant Rate Factor for quality
|
||||
'-profile:v high', // Higher profile for better compression
|
||||
'-level:v 4.1', // Updated level for better compatibility
|
||||
// Parallel processing and performance
|
||||
'-threads 0', // Auto-detect optimal thread count
|
||||
'-x264-params keyint=48:min-keyint=48', // More precise GOP control
|
||||
// HLS specific optimizations
|
||||
'-hls_time 4', // Shorter segment duration for better adaptive streaming
|
||||
'-hls_list_size 0', // Keep all segments in playlist
|
||||
'-hls_flags independent_segments+delete_segments', // Allow segment cleanup
|
||||
// Additional encoding optimizations
|
||||
'-sc_threshold 0', // Disable scene change detection for more consistent segments
|
||||
'-max_muxing_queue_size 1024', // Increase muxing queue size
|
||||
// Output format
|
||||
'-f hls',
|
||||
])
|
||||
.output(m3u8Path)
|
||||
.on('start', (commandLine) => {
|
||||
this.logger.log(`Starting ffmpeg with command: ${commandLine}`);
|
||||
})
|
||||
.on('end', () => {
|
||||
this.logger.log(`Successfully generated M3U8 stream at: ${m3u8Path}`);
|
||||
resolve(m3u8Path);
|
||||
})
|
||||
.on('error', (err) => {
|
||||
const errorMessage = `Error generating M3U8 stream for ${filepath}: ${err.message}`;
|
||||
this.logger.error(errorMessage);
|
||||
reject(new Error(errorMessage));
|
||||
})
|
||||
.run();
|
||||
});
|
||||
}
|
||||
private async getVideoMetadata(filepath: string): Promise<Partial<VideoMetadata>> {
|
||||
this.logger.log(`Getting video metadata for file: ${filepath}`);
|
||||
return new Promise((resolve, reject) => {
|
||||
ffmpeg.ffprobe(filepath, (err, metadata) => {
|
||||
if (err) {
|
||||
this.logger.error(`Error getting video metadata: ${err.message}`);
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
const videoStream = metadata.streams.find(stream => stream.codec_type === 'video');
|
||||
const audioStream = metadata.streams.find(stream => stream.codec_type === 'audio');
|
||||
const videoMetadata: Partial<VideoMetadata> = {
|
||||
width: videoStream?.width || 0,
|
||||
height: videoStream?.height || 0,
|
||||
duration: metadata.format.duration || 0,
|
||||
videoCodec: videoStream?.codec_name || '',
|
||||
audioCodec: audioStream?.codec_name || ''
|
||||
};
|
||||
this.logger.log(`Extracted video metadata: ${JSON.stringify(videoMetadata)}`);
|
||||
resolve(videoMetadata);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
|
@ -23,14 +23,7 @@ export class ResourceService extends BaseService<Prisma.ResourceDelegate> {
|
|||
}
|
||||
return super.create(args);
|
||||
}
|
||||
async checkFileExists(hash: string): Promise<Resource | null> {
|
||||
return this.findFirst({
|
||||
where: {
|
||||
hash,
|
||||
deletedAt: null,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
async softDeleteByFileId(fileId: string) {
|
||||
return this.update({
|
||||
where: {
|
||||
|
|
|
@ -2,7 +2,7 @@ import { Resource } from "@nice/common";
|
|||
|
||||
export interface ResourceProcessor {
|
||||
process(resource: Resource): Promise<any>
|
||||
}// 处理结果
|
||||
}
|
||||
export interface ProcessResult {
|
||||
success: boolean
|
||||
resource: Resource
|
||||
|
@ -22,7 +22,7 @@ export interface BaseMetadata {
|
|||
export interface ImageMetadata {
|
||||
width: number; // 图片宽度(px)
|
||||
height: number; // 图片高度(px)
|
||||
compressedUrl?:string;
|
||||
compressedUrl?: string;
|
||||
orientation?: number; // EXIF方向信息
|
||||
space?: string; // 色彩空间 (如: RGB, CMYK)
|
||||
hasAlpha?: boolean; // 是否包含透明通道
|
||||
|
@ -32,13 +32,12 @@ export interface ImageMetadata {
|
|||
* 视频特有元数据接口
|
||||
*/
|
||||
export interface VideoMetadata {
|
||||
width: number; // 视频宽度(px)
|
||||
height: number; // 视频高度(px)
|
||||
duration: number; // 视频时长(秒)
|
||||
thumbnail?: string; // 视频封面图URL
|
||||
codec?: string; // 视频编码格式
|
||||
frameRate?: number; // 帧率(fps)
|
||||
bitrate?: number; // 比特率(bps)
|
||||
width?: number;
|
||||
height?: number;
|
||||
duration?: number;
|
||||
videoCodec?: string;
|
||||
audioCodec?: string;
|
||||
coverUrl?: string
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
import { InjectQueue } from "@nestjs/bullmq";
|
||||
import { Injectable } from "@nestjs/common";
|
||||
import EventBus from "@server/utils/event-bus";
|
||||
import { Queue } from "bullmq";
|
||||
import { ObjectType } from "@nice/common";
|
||||
import { QueueJobType } from "../types";
|
||||
@Injectable()
|
||||
export class PostProcessService {
|
||||
constructor(
|
||||
@InjectQueue('general') private generalQueue: Queue
|
||||
) {
|
||||
|
||||
}
|
||||
|
||||
private generateJobId(type: ObjectType, data: any): string {
|
||||
// 根据类型和相关ID生成唯一的job标识
|
||||
switch (type) {
|
||||
case ObjectType.ENROLLMENT:
|
||||
return `stats_${type}_${data.courseId}`;
|
||||
case ObjectType.LECTURE:
|
||||
return `stats_${type}_${data.courseId}_${data.sectionId}`;
|
||||
case ObjectType.POST:
|
||||
return `stats_${type}_${data.courseId}`;
|
||||
default:
|
||||
return `stats_${type}_${Date.now()}`;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,9 +3,11 @@ import { Logger } from '@nestjs/common';
|
|||
import { QueueJobType } from '../types';
|
||||
import { ResourceProcessingPipeline } from '@server/models/resource/pipe/resource.pipeline';
|
||||
import { ImageProcessor } from '@server/models/resource/processor/ImageProcessor';
|
||||
import { VideoProcessor } from '@server/models/resource/processor/VideoProcessor';
|
||||
const logger = new Logger('FileProcessorWorker');
|
||||
const pipeline = new ResourceProcessingPipeline()
|
||||
.addProcessor(new ImageProcessor())
|
||||
.addProcessor(new VideoProcessor())
|
||||
export default async function processJob(job: Job<any, any, QueueJobType>) {
|
||||
if (job.name === QueueJobType.FILE_PROCESS) {
|
||||
console.log(job)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import { Injectable, OnModuleInit, Logger } from '@nestjs/common';
|
||||
import { Server, Upload } from "@nice/tus"
|
||||
import { FileStore } from '@tus/file-store';
|
||||
import { Server, Uid, Upload } from "@nice/tus"
|
||||
import { FileStore } from '@nice/tus';
|
||||
import { Request, Response } from "express"
|
||||
import { db, ResourceStatus } from '@nice/common';
|
||||
import { getFilenameWithoutExt } from '@server/utils/file';
|
||||
|
@ -9,30 +9,36 @@ import { Cron, CronExpression } from '@nestjs/schedule';
|
|||
import { InjectQueue } from '@nestjs/bullmq';
|
||||
import { Queue } from 'bullmq';
|
||||
import { QueueJobType } from '@server/queue/types';
|
||||
|
||||
// Centralized configuration for file storage
|
||||
import { nanoid } from 'nanoid-cjs';
|
||||
import { slugify } from 'transliteration';
|
||||
import path from 'path';
|
||||
const FILE_UPLOAD_CONFIG = {
|
||||
directory: "./uploads",
|
||||
directory: process.env.UPLOAD_DIR,
|
||||
maxSizeBytes: 20_000_000_000, // 20GB
|
||||
expirationPeriod: 24 * 60 * 60 * 1000 // 24 hours
|
||||
};
|
||||
|
||||
@Injectable()
|
||||
export class TusService implements OnModuleInit {
|
||||
private readonly logger = new Logger(TusService.name);
|
||||
private tusServer: Server;
|
||||
|
||||
constructor(private readonly resourceService: ResourceService,
|
||||
@InjectQueue("file-queue") private fileQueue: Queue
|
||||
) { }
|
||||
|
||||
onModuleInit() {
|
||||
this.initializeTusServer();
|
||||
this.setupTusEventHandlers();
|
||||
}
|
||||
|
||||
private initializeTusServer() {
|
||||
this.tusServer = new Server({
|
||||
namingFunction(req, metadata) {
|
||||
const safeFilename = slugify(metadata.filename);
|
||||
const now = new Date();
|
||||
const year = now.getFullYear();
|
||||
const month = String(now.getMonth() + 1).padStart(2, '0');
|
||||
const day = String(now.getDate()).padStart(2, '0');
|
||||
const uniqueId = nanoid(10);
|
||||
return `${year}/${month}/${day}/${uniqueId}/${safeFilename}`;
|
||||
},
|
||||
path: '/upload',
|
||||
datastore: new FileStore({
|
||||
directory: FILE_UPLOAD_CONFIG.directory,
|
||||
|
@ -40,7 +46,10 @@ export class TusService implements OnModuleInit {
|
|||
}),
|
||||
maxSize: FILE_UPLOAD_CONFIG.maxSizeBytes,
|
||||
postReceiveInterval: 1000,
|
||||
getFileIdFromRequest: (_, lastPath) => lastPath
|
||||
getFileIdFromRequest: (req, lastPath) => {
|
||||
const match = req.url.match(/\/upload\/(.+)/);
|
||||
return match ? match[1] : lastPath;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -48,15 +57,20 @@ export class TusService implements OnModuleInit {
|
|||
this.tusServer.on("POST_CREATE", this.handleUploadCreate.bind(this));
|
||||
this.tusServer.on("POST_FINISH", this.handleUploadFinish.bind(this));
|
||||
}
|
||||
|
||||
private getFileId(uploadId: string) {
|
||||
return uploadId.replace(/\/[^/]+$/, '')
|
||||
}
|
||||
private async handleUploadCreate(req: Request, res: Response, upload: Upload, url: string) {
|
||||
try {
|
||||
|
||||
const fileId = this.getFileId(upload.id)
|
||||
const filename = upload.metadata.filename
|
||||
await this.resourceService.create({
|
||||
data: {
|
||||
title: getFilenameWithoutExt(upload.metadata.filename),
|
||||
filename: upload.metadata.filename,
|
||||
fileId: upload.id,
|
||||
url,
|
||||
filename,
|
||||
fileId, // 移除最后的文件名
|
||||
url: upload.id,
|
||||
metadata: upload.metadata,
|
||||
status: ResourceStatus.UPLOADING
|
||||
}
|
||||
|
@ -69,11 +83,11 @@ export class TusService implements OnModuleInit {
|
|||
private async handleUploadFinish(req: Request, res: Response, upload: Upload) {
|
||||
try {
|
||||
const resource = await this.resourceService.update({
|
||||
where: { fileId: upload.id },
|
||||
where: { fileId: this.getFileId(upload.id) },
|
||||
data: { status: ResourceStatus.UPLOADED }
|
||||
});
|
||||
this.fileQueue.add(QueueJobType.FILE_PROCESS, { resource }, { jobId: resource.id })
|
||||
this.logger.log('Upload finished', { resourceId: resource.id });
|
||||
this.logger.log(`Upload finished ${resource.url}`);
|
||||
} catch (error) {
|
||||
this.logger.error('Failed to update resource after upload', error);
|
||||
}
|
||||
|
@ -99,6 +113,7 @@ export class TusService implements OnModuleInit {
|
|||
}
|
||||
|
||||
async handleTus(req: Request, res: Response) {
|
||||
|
||||
return this.tusServer.handle(req, res);
|
||||
}
|
||||
}
|
|
@ -8,6 +8,8 @@ import {
|
|||
Patch,
|
||||
Param,
|
||||
Delete,
|
||||
Head,
|
||||
Options,
|
||||
} from '@nestjs/common';
|
||||
import { Request, Response } from "express"
|
||||
import { TusService } from './tus.service';
|
||||
|
@ -15,97 +17,39 @@ import { TusService } from './tus.service';
|
|||
@Controller('upload')
|
||||
export class UploadController {
|
||||
constructor(private readonly tusService: TusService) { }
|
||||
// @Post()
|
||||
// async handlePost(@Req() req: Request, @Res() res: Response) {
|
||||
// return this.tusService.handleTus(req, res);
|
||||
// }
|
||||
|
||||
|
||||
@Options()
|
||||
async handleOptions(@Req() req: Request, @Res() res: Response) {
|
||||
return this.tusService.handleTus(req, res);
|
||||
}
|
||||
|
||||
@Head()
|
||||
async handleHead(@Req() req: Request, @Res() res: Response) {
|
||||
return this.tusService.handleTus(req, res);
|
||||
}
|
||||
|
||||
@Post()
|
||||
async handlePost(@Req() req: Request, @Res() res: Response) {
|
||||
return this.tusService.handleTus(req, res);
|
||||
}
|
||||
@Patch(':fileId') // 添加文件ID参数
|
||||
async handlePatch(
|
||||
@Req() req: Request,
|
||||
@Res() res: Response,
|
||||
@Param('fileId') fileId: string // 添加文件ID参数
|
||||
) {
|
||||
try {
|
||||
// 添加错误处理和日志
|
||||
const result = await this.tusService.handleTus(req, res);
|
||||
return result;
|
||||
} catch (error: any) {
|
||||
console.error('Upload PATCH error:', error);
|
||||
res.status(500).json({
|
||||
message: 'Upload failed',
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
@Delete(':fileId')
|
||||
async handleDelete(
|
||||
@Req() req: Request,
|
||||
@Res() res: Response,
|
||||
@Param('fileId') fileId: string
|
||||
) {
|
||||
try {
|
||||
const result = await this.tusService.handleTus(req, res);
|
||||
return result;
|
||||
} catch (error: any) {
|
||||
console.error('Upload DELETE error:', error);
|
||||
res.status(500).json({
|
||||
message: 'Delete failed',
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
@Get("/*")
|
||||
async handleGet(@Req() req: Request, @Res() res: Response) {
|
||||
return this.tusService.handleTus(req, res);
|
||||
}
|
||||
|
||||
@Get(':fileId')
|
||||
async handleGet(
|
||||
@Req() req: Request,
|
||||
@Res() res: Response,
|
||||
@Param('fileId') fileId: string
|
||||
) {
|
||||
try {
|
||||
const result = await this.tusService.handleTus(req, res);
|
||||
return result;
|
||||
} catch (error: any) {
|
||||
console.error('Upload GET error:', error);
|
||||
res.status(500).json({
|
||||
message: 'Retrieve failed',
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
@Patch("/*")
|
||||
async handlePatch(@Req() req: Request, @Res() res: Response) {
|
||||
return this.tusService.handleTus(req, res);
|
||||
}
|
||||
// @Post('chunk')
|
||||
// @UseInterceptors(FileInterceptor('file'))
|
||||
// async uploadChunk(
|
||||
// @Body('chunk') chunkString: string, // 改为接收字符串
|
||||
// @UploadedFile() file: Express.Multer.File,
|
||||
// @Body('clientId') clientId: string
|
||||
// ) {
|
||||
// const chunk = JSON.parse(chunkString); // 解析字符串为对象
|
||||
// await this.uploadService.uploadChunk(chunk, file, clientId);
|
||||
// return { message: 'Chunk uploaded successfully' };
|
||||
// }
|
||||
// @Get('status/:identifier')
|
||||
// checkUploadStatusInfo(@Param('identifier') identifier: string) {
|
||||
// const status = this.uploadService.checkUploadStatusInfo(identifier);
|
||||
// return status || { message: 'No upload status found' };
|
||||
// }
|
||||
// @Post('pause/:identifier')
|
||||
// pauseUpload(
|
||||
// @Param('identifier') identifier: string,
|
||||
// @Body('clientId') clientId: string
|
||||
// ) {
|
||||
// this.uploadService.pauseUpload(identifier, clientId);
|
||||
// return { message: 'Upload paused successfully' };
|
||||
// }
|
||||
|
||||
// @Post('resume/:identifier')
|
||||
// async resumeUpload(
|
||||
// @Param('identifier') identifier: string,
|
||||
// @Body('clientId') clientId: string
|
||||
// ) {
|
||||
// const resumed = this.uploadService.resumeUpload(identifier, clientId);
|
||||
// if (!resumed) {
|
||||
// throw new Error('Unable to resume upload');
|
||||
// }
|
||||
// return { message: 'Upload resumed successfully' };
|
||||
// }
|
||||
// Keeping the catch-all method as a fallback
|
||||
@All()
|
||||
async handleUpload(@Req() req: Request, @Res() res: Response) {
|
||||
return this.tusService.handleTus(req, res);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
export function extractFileIdFromNginxUrl(url: string) {
|
||||
const match = url.match(/uploads\/(\d{4}\/\d{2}\/\d{2}\/[^/]+)/);
|
||||
return match ? match[1] : '';
|
||||
}
|
|
@ -7,10 +7,6 @@ dotenv.config();
|
|||
export function getFilenameWithoutExt(filename: string) {
|
||||
return filename ? filename.replace(/\.[^/.]+$/, '') : filename;
|
||||
}
|
||||
export function extractFilePathFromUri(uri: string): string {
|
||||
// 从 /uploads/ 路径中提取文件路径
|
||||
return uri.replace('/uploads/', '');
|
||||
}
|
||||
/**
|
||||
* 计算文件的 SHA-256 哈希值
|
||||
* @param filePath 文件路径
|
||||
|
|
|
@ -1,20 +1,32 @@
|
|||
server {
|
||||
# 监听80端口
|
||||
listen 80;
|
||||
server_name 192.168.12.77;
|
||||
# 服务器域名/IP地址,使用环境变量
|
||||
server_name host.docker.internal;
|
||||
|
||||
# 基础优化配置
|
||||
# 基础性能优化配置
|
||||
# 启用tcp_nopush以优化数据发送
|
||||
tcp_nopush on;
|
||||
# 启用tcp_nodelay减少网络延迟
|
||||
tcp_nodelay on;
|
||||
# 设置哈希表最大大小
|
||||
types_hash_max_size 2048;
|
||||
|
||||
# Gzip 压缩配置
|
||||
# Gzip压缩配置,提高传输效率
|
||||
gzip on;
|
||||
# 对IE6禁用Gzip
|
||||
gzip_disable "msie6";
|
||||
# 启用Vary头,支持缓存变体
|
||||
gzip_vary on;
|
||||
# 对所有代理请求启用压缩
|
||||
gzip_proxied any;
|
||||
# 压缩级别(1-9),6为推荐值
|
||||
gzip_comp_level 6;
|
||||
# 设置压缩缓冲区
|
||||
gzip_buffers 16 8k;
|
||||
# 压缩HTTP版本
|
||||
gzip_http_version 1.1;
|
||||
# 压缩的文件类型
|
||||
gzip_types
|
||||
text/plain
|
||||
text/css
|
||||
|
@ -25,47 +37,57 @@ server {
|
|||
application/xml+rss
|
||||
text/javascript;
|
||||
|
||||
# 默认首页配置
|
||||
# 默认站点位置配置
|
||||
location / {
|
||||
# 网站根目录
|
||||
root /usr/share/nginx/html;
|
||||
# 默认首页文件
|
||||
index index.html index.htm;
|
||||
|
||||
# 文件缓存配置
|
||||
# 文件缓存优化
|
||||
# 最大缓存1000个文件,非活跃文件20秒后失效
|
||||
open_file_cache max=1000 inactive=20s;
|
||||
# 缓存验证时间
|
||||
open_file_cache_valid 30s;
|
||||
# 至少被访问2次的文件才缓存
|
||||
open_file_cache_min_uses 2;
|
||||
# 缓存文件错误信息
|
||||
open_file_cache_errors on;
|
||||
|
||||
# 尝试查找文件,不存在则重定向到index.html(适用于单页应用)
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
# 文件上传处理配置
|
||||
# 文件上传处理位置
|
||||
location /uploads/ {
|
||||
# 文件实际存储路径
|
||||
alias /data/uploads/;
|
||||
|
||||
# 文件传输优化
|
||||
# 文件传输性能优化
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
# 异步IO
|
||||
aio on;
|
||||
# 直接IO,提高大文件传输效率
|
||||
directio 512;
|
||||
|
||||
# 认证配置
|
||||
# 文件访问认证
|
||||
# 通过内部认证服务验证
|
||||
auth_request /auth-file;
|
||||
# 存储认证状态和用户信息
|
||||
auth_request_set $auth_status $upstream_status;
|
||||
auth_request_set $auth_user_id $upstream_http_x_user_id;
|
||||
auth_request_set $auth_resource_type $upstream_http_x_resource_type;
|
||||
|
||||
# 缓存控制
|
||||
# 不缓存
|
||||
expires 0;
|
||||
# 私有缓存,禁止转换
|
||||
add_header Cache-Control "private, no-transform";
|
||||
# 添加用户和资源类型头
|
||||
add_header X-User-Id $auth_user_id;
|
||||
add_header X-Resource-Type $auth_resource_type;
|
||||
|
||||
# 带宽控制
|
||||
# 超过100MB后限制速率为102400KB/s
|
||||
limit_rate 102400k;
|
||||
limit_rate_after 100m;
|
||||
|
||||
# CORS 配置
|
||||
# 跨域资源共享(CORS)配置
|
||||
add_header 'Access-Control-Allow-Origin' '$http_origin' always;
|
||||
add_header 'Access-Control-Allow-Credentials' 'true' always;
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always;
|
||||
|
@ -73,17 +95,16 @@ server {
|
|||
'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization'
|
||||
always;
|
||||
}
|
||||
|
||||
# 认证服务配置
|
||||
# 内部认证服务位置
|
||||
location = /auth-file {
|
||||
# 仅供内部使用
|
||||
internal;
|
||||
proxy_pass http://192.168.12.77:3000/auth/file;
|
||||
|
||||
# 请求优化
|
||||
# 代理到认证服务
|
||||
proxy_pass http://host.docker.internal:3000/auth/file;
|
||||
# 请求优化:不传递请求体
|
||||
proxy_pass_request_body off;
|
||||
proxy_set_header Content-Length "";
|
||||
|
||||
# 请求信息传递
|
||||
# 传递原始请求信息
|
||||
proxy_set_header X-Original-URI $request_uri;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Original-Method $request_method;
|
||||
|
|
|
@ -1,20 +1,32 @@
|
|||
server {
|
||||
# 监听80端口
|
||||
listen 80;
|
||||
# 服务器域名/IP地址,使用环境变量
|
||||
server_name ${SERVER_IP};
|
||||
|
||||
# 基础优化配置
|
||||
# 基础性能优化配置
|
||||
# 启用tcp_nopush以优化数据发送
|
||||
tcp_nopush on;
|
||||
# 启用tcp_nodelay减少网络延迟
|
||||
tcp_nodelay on;
|
||||
# 设置哈希表最大大小
|
||||
types_hash_max_size 2048;
|
||||
|
||||
# Gzip 压缩配置
|
||||
# Gzip压缩配置,提高传输效率
|
||||
gzip on;
|
||||
# 对IE6禁用Gzip
|
||||
gzip_disable "msie6";
|
||||
# 启用Vary头,支持缓存变体
|
||||
gzip_vary on;
|
||||
# 对所有代理请求启用压缩
|
||||
gzip_proxied any;
|
||||
# 压缩级别(1-9),6为推荐值
|
||||
gzip_comp_level 6;
|
||||
# 设置压缩缓冲区
|
||||
gzip_buffers 16 8k;
|
||||
# 压缩HTTP版本
|
||||
gzip_http_version 1.1;
|
||||
# 压缩的文件类型
|
||||
gzip_types
|
||||
text/plain
|
||||
text/css
|
||||
|
@ -25,47 +37,57 @@ server {
|
|||
application/xml+rss
|
||||
text/javascript;
|
||||
|
||||
# 默认首页配置
|
||||
# 默认站点位置配置
|
||||
location / {
|
||||
# 网站根目录
|
||||
root /usr/share/nginx/html;
|
||||
# 默认首页文件
|
||||
index index.html index.htm;
|
||||
|
||||
# 文件缓存配置
|
||||
# 文件缓存优化
|
||||
# 最大缓存1000个文件,非活跃文件20秒后失效
|
||||
open_file_cache max=1000 inactive=20s;
|
||||
# 缓存验证时间
|
||||
open_file_cache_valid 30s;
|
||||
# 至少被访问2次的文件才缓存
|
||||
open_file_cache_min_uses 2;
|
||||
# 缓存文件错误信息
|
||||
open_file_cache_errors on;
|
||||
|
||||
# 尝试查找文件,不存在则重定向到index.html(适用于单页应用)
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
# 文件上传处理配置
|
||||
# 文件上传处理位置
|
||||
location /uploads/ {
|
||||
# 文件实际存储路径
|
||||
alias /data/uploads/;
|
||||
|
||||
# 文件传输优化
|
||||
# 文件传输性能优化
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
# 异步IO
|
||||
aio on;
|
||||
# 直接IO,提高大文件传输效率
|
||||
directio 512;
|
||||
|
||||
# 认证配置
|
||||
# 文件访问认证
|
||||
# 通过内部认证服务验证
|
||||
auth_request /auth-file;
|
||||
# 存储认证状态和用户信息
|
||||
auth_request_set $auth_status $upstream_status;
|
||||
auth_request_set $auth_user_id $upstream_http_x_user_id;
|
||||
auth_request_set $auth_resource_type $upstream_http_x_resource_type;
|
||||
|
||||
# 缓存控制
|
||||
# 不缓存
|
||||
expires 0;
|
||||
# 私有缓存,禁止转换
|
||||
add_header Cache-Control "private, no-transform";
|
||||
# 添加用户和资源类型头
|
||||
add_header X-User-Id $auth_user_id;
|
||||
add_header X-Resource-Type $auth_resource_type;
|
||||
|
||||
# 带宽控制
|
||||
# 超过100MB后限制速率为102400KB/s
|
||||
limit_rate 102400k;
|
||||
limit_rate_after 100m;
|
||||
|
||||
# CORS 配置
|
||||
# 跨域资源共享(CORS)配置
|
||||
add_header 'Access-Control-Allow-Origin' '$http_origin' always;
|
||||
add_header 'Access-Control-Allow-Credentials' 'true' always;
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always;
|
||||
|
@ -73,17 +95,16 @@ server {
|
|||
'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization'
|
||||
always;
|
||||
}
|
||||
|
||||
# 认证服务配置
|
||||
# 内部认证服务位置
|
||||
location = /auth-file {
|
||||
# 仅供内部使用
|
||||
internal;
|
||||
# 代理到认证服务
|
||||
proxy_pass http://${SERVER_IP}:3000/auth/file;
|
||||
|
||||
# 请求优化
|
||||
# 请求优化:不传递请求体
|
||||
proxy_pass_request_body off;
|
||||
proxy_set_header Content-Length "";
|
||||
|
||||
# 请求信息传递
|
||||
# 传递原始请求信息
|
||||
proxy_set_header X-Original-URI $request_uri;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Original-Method $request_method;
|
||||
|
|
|
@ -5,16 +5,6 @@ generator client {
|
|||
provider = "prisma-client-js"
|
||||
}
|
||||
|
||||
// generator zod {
|
||||
// provider = "zod-prisma-types"
|
||||
// output = "../src/generated" // (default) the directory where generated zod schemas will be saved
|
||||
// createModelTypes = true
|
||||
// createRelationValuesTypes = true
|
||||
// writeNullishInModelTypes = true
|
||||
// createPartialTypes = false
|
||||
// useMultipleFiles = false
|
||||
// useTypeAssertions = true
|
||||
// }
|
||||
|
||||
datasource db {
|
||||
provider = "postgresql"
|
||||
|
@ -442,21 +432,18 @@ model Resource {
|
|||
metadata Json? @map("metadata")
|
||||
// 处理状态控制
|
||||
status String?
|
||||
// 审计字段
|
||||
createdAt DateTime? @default(now()) @map("created_at")
|
||||
updatedAt DateTime? @updatedAt @map("updated_at")
|
||||
createdBy String? @map("created_by")
|
||||
updatedBy String? @map("updated_by")
|
||||
deletedAt DateTime? @map("deleted_at")
|
||||
isPublic Boolean? @default(true) @map("is_public")
|
||||
|
||||
owner Staff? @relation(fields: [ownerId], references: [id])
|
||||
ownerId String? @map("owner_id")
|
||||
post Post? @relation(fields: [postId], references: [id])
|
||||
postId String? @map("post_id")
|
||||
lecture Lecture? @relation(fields: [lectureId], references: [id])
|
||||
lectureId String? @map("lecture_id")
|
||||
|
||||
// 索引
|
||||
@@index([type])
|
||||
@@index([createdAt])
|
||||
|
|
|
@ -12,17 +12,24 @@
|
|||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.723.0",
|
||||
"@shopify/semaphore": "^3.1.0",
|
||||
"debug": "^4.4.0",
|
||||
"lodash.throttle": "^4.1.1"
|
||||
"lodash.throttle": "^4.1.1",
|
||||
"multistream": "^4.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/debug": "^4.1.12",
|
||||
"@types/lodash.throttle": "^4.1.9",
|
||||
"@types/multistream": "^4.1.3",
|
||||
"@types/node": "^20.3.1",
|
||||
"concurrently": "^8.0.0",
|
||||
"ioredis": "^5.4.1",
|
||||
"rimraf": "^6.0.1",
|
||||
"should": "^13.2.3",
|
||||
"ts-node": "^10.9.1",
|
||||
"tsup": "^8.3.5",
|
||||
"typescript": "^5.5.4"
|
||||
"typescript": "^5.5.4",
|
||||
"@redis/client": "^1.6.0"
|
||||
}
|
||||
}
|
|
@ -62,6 +62,7 @@ export class PatchHandler extends BaseHandler {
|
|||
try {
|
||||
// 从请求中获取文件ID
|
||||
const id = this.getFileIdFromRequest(req)
|
||||
console.log('id', id)
|
||||
if (!id) {
|
||||
throw ERRORS.FILE_NOT_FOUND
|
||||
}
|
||||
|
|
|
@ -2,3 +2,4 @@ export { Server } from './server'
|
|||
export * from './types'
|
||||
export * from './lockers'
|
||||
export * from './utils'
|
||||
export * from "./store"
|
|
@ -12,6 +12,7 @@ import type stream from 'node:stream'
|
|||
import type { ServerOptions, RouteHandler, WithOptional } from './types'
|
||||
import { MemoryLocker } from './lockers'
|
||||
import { EVENTS, Upload, DataStore, REQUEST_METHODS, ERRORS, TUS_RESUMABLE, EXPOSED_HEADERS, CancellationContext } from './utils'
|
||||
import { message } from 'antd';
|
||||
|
||||
/**
|
||||
* 处理器类型映射
|
||||
|
@ -225,6 +226,7 @@ export class Server extends EventEmitter {
|
|||
res: http.ServerResponse
|
||||
// biome-ignore lint/suspicious/noConfusingVoidType: it's fine
|
||||
): Promise<http.ServerResponse | stream.Writable | void> {
|
||||
|
||||
const context = this.createContext(req)
|
||||
log(`[TusServer] handle: ${req.method} ${req.url}`)
|
||||
// 允许覆盖 HTTP 方法。这样做的原因是某些库/环境不支持 PATCH 和 DELETE 请求,例如浏览器中的 Flash 和 Java 部分环境
|
||||
|
@ -236,6 +238,7 @@ export class Server extends EventEmitter {
|
|||
body?: string
|
||||
message: string
|
||||
}) => {
|
||||
|
||||
let status_code = error.status_code || ERRORS.UNKNOWN_ERROR.status_code
|
||||
let body = error.body || `${ERRORS.UNKNOWN_ERROR.body}${error.message || ''}\n`
|
||||
if (this.options.onResponseError) {
|
||||
|
@ -251,6 +254,7 @@ export class Server extends EventEmitter {
|
|||
const handler = this.handlers.GET
|
||||
return handler.send(req, res).catch(onError)
|
||||
}
|
||||
|
||||
// Tus-Resumable 头部必须包含在每个请求和响应中,除了 OPTIONS 请求。其值必须是客户端或服务器使用的协议版本。
|
||||
res.setHeader('Tus-Resumable', TUS_RESUMABLE)
|
||||
if (req.method !== 'OPTIONS' && req.headers['tus-resumable'] === undefined) {
|
||||
|
@ -283,9 +287,11 @@ export class Server extends EventEmitter {
|
|||
if (this.options.allowedCredentials === true) {
|
||||
res.setHeader('Access-Control-Allow-Credentials', 'true')
|
||||
}
|
||||
|
||||
// 调用请求方法的处理器
|
||||
const handler = this.handlers[req.method as keyof Handlers]
|
||||
if (handler) {
|
||||
|
||||
return handler.send(req, res, context).catch(onError)
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
// TODO: use /promises versions
|
||||
import fs from 'node:fs'
|
||||
import fsProm from 'node:fs/promises'
|
||||
import path from 'node:path'
|
||||
import stream from 'node:stream'
|
||||
import type http from 'node:http'
|
||||
|
||||
import debug from 'debug'
|
||||
import { DataStore, Upload, ERRORS } from '../../utils'
|
||||
import {
|
||||
FileKvStore as FileConfigstore,
|
||||
MemoryKvStore as MemoryConfigstore,
|
||||
RedisKvStore as RedisConfigstore,
|
||||
KvStore as Configstore,
|
||||
} from '../../utils'
|
||||
|
||||
type Options = {
|
||||
directory: string
|
||||
configstore?: Configstore
|
||||
expirationPeriodInMilliseconds?: number
|
||||
}
|
||||
|
||||
const MASK = '0777'
|
||||
const IGNORED_MKDIR_ERROR = 'EEXIST'
|
||||
const FILE_DOESNT_EXIST = 'ENOENT'
|
||||
const log = debug('tus-node-server:stores:filestore')
|
||||
|
||||
export class FileStore extends DataStore {
|
||||
directory: string
|
||||
configstore: Configstore
|
||||
expirationPeriodInMilliseconds: number
|
||||
|
||||
constructor({ directory, configstore, expirationPeriodInMilliseconds }: Options) {
|
||||
super()
|
||||
this.directory = directory
|
||||
this.configstore = configstore ?? new FileConfigstore(directory)
|
||||
this.expirationPeriodInMilliseconds = expirationPeriodInMilliseconds ?? 0
|
||||
this.extensions = [
|
||||
'creation',
|
||||
'creation-with-upload',
|
||||
'creation-defer-length',
|
||||
'termination',
|
||||
'expiration',
|
||||
]
|
||||
// TODO: this async call can not happen in the constructor
|
||||
this.checkOrCreateDirectory()
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure the directory exists.
|
||||
*/
|
||||
private checkOrCreateDirectory() {
|
||||
fs.mkdir(this.directory, { mode: MASK, recursive: true }, (error) => {
|
||||
if (error && error.code !== IGNORED_MKDIR_ERROR) {
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an empty file.
|
||||
*/
|
||||
async create(file: Upload): Promise<Upload> {
|
||||
const dirs = file.id.split('/').slice(0, -1)
|
||||
const filePath = path.join(this.directory, file.id)
|
||||
|
||||
await fsProm.mkdir(path.join(this.directory, ...dirs), { recursive: true })
|
||||
await fsProm.writeFile(filePath, '')
|
||||
await this.configstore.set(file.id, file)
|
||||
|
||||
file.storage = { type: 'file', path: filePath }
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
read(file_id: string) {
|
||||
return fs.createReadStream(path.join(this.directory, file_id))
|
||||
}
|
||||
|
||||
remove(file_id: string): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
fs.unlink(`${this.directory}/${file_id}`, (err) => {
|
||||
if (err) {
|
||||
log('[FileStore] delete: Error', err)
|
||||
reject(ERRORS.FILE_NOT_FOUND)
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
resolve(this.configstore.delete(file_id))
|
||||
} catch (error) {
|
||||
reject(error)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
write(
|
||||
readable: http.IncomingMessage | stream.Readable,
|
||||
file_id: string,
|
||||
offset: number
|
||||
): Promise<number> {
|
||||
const file_path = path.join(this.directory, file_id)
|
||||
const writeable = fs.createWriteStream(file_path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
|
||||
let bytes_received = 0
|
||||
const transform = new stream.Transform({
|
||||
transform(chunk, _, callback) {
|
||||
bytes_received += chunk.length
|
||||
callback(null, chunk)
|
||||
},
|
||||
})
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
stream.pipeline(readable, transform, writeable, (err) => {
|
||||
if (err) {
|
||||
log('[FileStore] write: Error', err)
|
||||
return reject(ERRORS.FILE_WRITE_ERROR)
|
||||
}
|
||||
|
||||
log(`[FileStore] write: ${bytes_received} bytes written to ${file_path}`)
|
||||
offset += bytes_received
|
||||
log(`[FileStore] write: File is now ${offset} bytes`)
|
||||
|
||||
return resolve(offset)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async getUpload(id: string): Promise<Upload> {
|
||||
const file = await this.configstore.get(id)
|
||||
|
||||
if (!file) {
|
||||
throw ERRORS.FILE_NOT_FOUND
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const file_path = `${this.directory}/${id}`
|
||||
fs.stat(file_path, (error, stats) => {
|
||||
if (error && error.code === FILE_DOESNT_EXIST && file) {
|
||||
log(
|
||||
`[FileStore] getUpload: No file found at ${file_path} but db record exists`,
|
||||
file
|
||||
)
|
||||
return reject(ERRORS.FILE_NO_LONGER_EXISTS)
|
||||
}
|
||||
|
||||
if (error && error.code === FILE_DOESNT_EXIST) {
|
||||
log(`[FileStore] getUpload: No file found at ${file_path}`)
|
||||
return reject(ERRORS.FILE_NOT_FOUND)
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return reject(error)
|
||||
}
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
log(`[FileStore] getUpload: ${file_path} is a directory`)
|
||||
return reject(ERRORS.FILE_NOT_FOUND)
|
||||
}
|
||||
|
||||
return resolve(
|
||||
new Upload({
|
||||
id,
|
||||
size: file.size,
|
||||
offset: stats.size,
|
||||
metadata: file.metadata,
|
||||
creation_date: file.creation_date,
|
||||
storage: { type: 'file', path: file_path },
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async declareUploadLength(id: string, upload_length: number) {
|
||||
const file = await this.configstore.get(id)
|
||||
|
||||
if (!file) {
|
||||
throw ERRORS.FILE_NOT_FOUND
|
||||
}
|
||||
|
||||
file.size = upload_length
|
||||
|
||||
await this.configstore.set(id, file)
|
||||
}
|
||||
|
||||
async deleteExpired(): Promise<number> {
|
||||
const now = new Date()
|
||||
const toDelete: Promise<void>[] = []
|
||||
|
||||
if (!this.configstore.list) {
|
||||
throw ERRORS.UNSUPPORTED_EXPIRATION_EXTENSION
|
||||
}
|
||||
|
||||
const uploadKeys = await this.configstore.list()
|
||||
for (const file_id of uploadKeys) {
|
||||
try {
|
||||
const info = await this.configstore.get(file_id)
|
||||
if (
|
||||
info &&
|
||||
'creation_date' in info &&
|
||||
this.getExpiration() > 0 &&
|
||||
info.size !== info.offset &&
|
||||
info.creation_date
|
||||
) {
|
||||
const creation = new Date(info.creation_date)
|
||||
const expires = new Date(creation.getTime() + this.getExpiration())
|
||||
if (now > expires) {
|
||||
toDelete.push(this.remove(file_id))
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
if (error !== ERRORS.FILE_NO_LONGER_EXISTS) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(toDelete)
|
||||
return toDelete.length
|
||||
}
|
||||
|
||||
getExpiration(): number {
|
||||
return this.expirationPeriodInMilliseconds
|
||||
}
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
export * from "./file-store"
|
||||
export * from "./s3-store"
|
|
@ -0,0 +1,803 @@
|
|||
import os from 'node:os'
|
||||
import fs, { promises as fsProm } from 'node:fs'
|
||||
import stream, { promises as streamProm } from 'node:stream'
|
||||
import type { Readable } from 'node:stream'
|
||||
|
||||
import type AWS from '@aws-sdk/client-s3'
|
||||
import { NoSuchKey, NotFound, S3, type S3ClientConfig } from '@aws-sdk/client-s3'
|
||||
import debug from 'debug'
|
||||
|
||||
import {
|
||||
DataStore,
|
||||
StreamSplitter,
|
||||
Upload,
|
||||
ERRORS,
|
||||
TUS_RESUMABLE,
|
||||
type KvStore,
|
||||
MemoryKvStore,
|
||||
} from '../../utils'
|
||||
|
||||
import { Semaphore, type Permit } from '@shopify/semaphore'
|
||||
import MultiStream from 'multistream'
|
||||
import crypto from 'node:crypto'
|
||||
import path from 'node:path'
|
||||
|
||||
const log = debug('tus-node-server:stores:s3store')
|
||||
|
||||
type Options = {
|
||||
// The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
|
||||
// The server calculates the optimal part size, which takes this size into account,
|
||||
// but may increase it to not exceed the S3 10K parts limit.
|
||||
partSize?: number
|
||||
useTags?: boolean
|
||||
maxConcurrentPartUploads?: number
|
||||
cache?: KvStore<MetadataValue>
|
||||
expirationPeriodInMilliseconds?: number
|
||||
// Options to pass to the AWS S3 SDK.
|
||||
s3ClientConfig: S3ClientConfig & { bucket: string }
|
||||
}
|
||||
|
||||
export type MetadataValue = {
|
||||
file: Upload
|
||||
'upload-id': string
|
||||
'tus-version': string
|
||||
}
|
||||
|
||||
function calcOffsetFromParts(parts?: Array<AWS.Part>) {
|
||||
// @ts-expect-error not undefined
|
||||
return parts && parts.length > 0 ? parts.reduce((a, b) => a + b.Size, 0) : 0
|
||||
}
|
||||
|
||||
// Implementation (based on https://github.com/tus/tusd/blob/master/s3store/s3store.go)
|
||||
//
|
||||
// Once a new tus upload is initiated, multiple objects in S3 are created:
|
||||
//
|
||||
// First of all, a new info object is stored which contains (as Metadata) a JSON-encoded
|
||||
// blob of general information about the upload including its size and meta data.
|
||||
// This kind of objects have the suffix ".info" in their key.
|
||||
//
|
||||
// In addition a new multipart upload
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) is
|
||||
// created. Whenever a new chunk is uploaded to tus-node-server using a PATCH request, a
|
||||
// new part is pushed to the multipart upload on S3.
|
||||
//
|
||||
// If meta data is associated with the upload during creation, it will be added
|
||||
// to the multipart upload and after finishing it, the meta data will be passed
|
||||
// to the final object. However, the metadata which will be attached to the
|
||||
// final object can only contain ASCII characters and every non-ASCII character
|
||||
// will be replaced by a question mark (for example, "Menü" will be "Men?").
|
||||
// However, this does not apply for the metadata returned by the `_getMetadata`
|
||||
// function since it relies on the info object for reading the metadata.
|
||||
// Therefore, HEAD responses will always contain the unchanged metadata, Base64-
|
||||
// encoded, even if it contains non-ASCII characters.
|
||||
//
|
||||
// Once the upload is finished, the multipart upload is completed, resulting in
|
||||
// the entire file being stored in the bucket. The info object, containing
|
||||
// meta data is not deleted.
|
||||
//
|
||||
// Considerations
|
||||
//
|
||||
// In order to support tus' principle of resumable upload, S3's Multipart-Uploads
|
||||
// are internally used.
|
||||
// For each incoming PATCH request (a call to `write`), a new part is uploaded
|
||||
// to S3.
|
||||
export class S3Store extends DataStore {
|
||||
private bucket: string
|
||||
private cache: KvStore<MetadataValue>
|
||||
private client: S3
|
||||
private preferredPartSize: number
|
||||
private expirationPeriodInMilliseconds = 0
|
||||
private useTags = true
|
||||
private partUploadSemaphore: Semaphore
|
||||
public maxMultipartParts = 10_000 as const
|
||||
public minPartSize = 5_242_880 as const // 5MiB
|
||||
public maxUploadSize = 5_497_558_138_880 as const // 5TiB
|
||||
|
||||
constructor(options: Options) {
|
||||
super()
|
||||
const { partSize, s3ClientConfig } = options
|
||||
const { bucket, ...restS3ClientConfig } = s3ClientConfig
|
||||
this.extensions = [
|
||||
'creation',
|
||||
'creation-with-upload',
|
||||
'creation-defer-length',
|
||||
'termination',
|
||||
'expiration',
|
||||
]
|
||||
this.bucket = bucket
|
||||
this.preferredPartSize = partSize || 8 * 1024 * 1024
|
||||
this.expirationPeriodInMilliseconds = options.expirationPeriodInMilliseconds ?? 0
|
||||
this.useTags = options.useTags ?? true
|
||||
this.cache = options.cache ?? new MemoryKvStore<MetadataValue>()
|
||||
this.client = new S3(restS3ClientConfig)
|
||||
this.partUploadSemaphore = new Semaphore(options.maxConcurrentPartUploads ?? 60)
|
||||
}
|
||||
|
||||
protected shouldUseExpirationTags() {
|
||||
return this.expirationPeriodInMilliseconds !== 0 && this.useTags
|
||||
}
|
||||
|
||||
protected useCompleteTag(value: 'true' | 'false') {
|
||||
if (!this.shouldUseExpirationTags()) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return `Tus-Completed=${value}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves upload metadata to a `${file_id}.info` file on S3.
|
||||
* Please note that the file is empty and the metadata is saved
|
||||
* on the S3 object's `Metadata` field, so that only a `headObject`
|
||||
* is necessary to retrieve the data.
|
||||
*/
|
||||
private async saveMetadata(upload: Upload, uploadId: string) {
|
||||
log(`[${upload.id}] saving metadata`)
|
||||
await this.client.putObject({
|
||||
Bucket: this.bucket,
|
||||
Key: this.infoKey(upload.id),
|
||||
Body: JSON.stringify(upload),
|
||||
Tagging: this.useCompleteTag('false'),
|
||||
Metadata: {
|
||||
'upload-id': uploadId,
|
||||
'tus-version': TUS_RESUMABLE,
|
||||
},
|
||||
})
|
||||
log(`[${upload.id}] metadata file saved`)
|
||||
}
|
||||
|
||||
private async completeMetadata(upload: Upload) {
|
||||
if (!this.shouldUseExpirationTags()) {
|
||||
return
|
||||
}
|
||||
|
||||
const { 'upload-id': uploadId } = await this.getMetadata(upload.id)
|
||||
await this.client.putObject({
|
||||
Bucket: this.bucket,
|
||||
Key: this.infoKey(upload.id),
|
||||
Body: JSON.stringify(upload),
|
||||
Tagging: this.useCompleteTag('true'),
|
||||
Metadata: {
|
||||
'upload-id': uploadId,
|
||||
'tus-version': TUS_RESUMABLE,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves upload metadata previously saved in `${file_id}.info`.
|
||||
* There's a small and simple caching mechanism to avoid multiple
|
||||
* HTTP calls to S3.
|
||||
*/
|
||||
private async getMetadata(id: string): Promise<MetadataValue> {
|
||||
const cached = await this.cache.get(id)
|
||||
if (cached) {
|
||||
return cached
|
||||
}
|
||||
|
||||
const { Metadata, Body } = await this.client.getObject({
|
||||
Bucket: this.bucket,
|
||||
Key: this.infoKey(id),
|
||||
})
|
||||
const file = JSON.parse((await Body?.transformToString()) as string)
|
||||
const metadata: MetadataValue = {
|
||||
'tus-version': Metadata?.['tus-version'] as string,
|
||||
'upload-id': Metadata?.['upload-id'] as string,
|
||||
file: new Upload({
|
||||
id,
|
||||
size: file.size ? Number.parseInt(file.size, 10) : undefined,
|
||||
offset: Number.parseInt(file.offset, 10),
|
||||
metadata: file.metadata,
|
||||
creation_date: file.creation_date,
|
||||
storage: file.storage,
|
||||
}),
|
||||
}
|
||||
await this.cache.set(id, metadata)
|
||||
return metadata
|
||||
}
|
||||
|
||||
private infoKey(id: string) {
|
||||
return `${id}.info`
|
||||
}
|
||||
|
||||
private partKey(id: string, isIncomplete = false) {
|
||||
if (isIncomplete) {
|
||||
id += '.part'
|
||||
}
|
||||
|
||||
// TODO: introduce ObjectPrefixing for parts and incomplete parts.
|
||||
// ObjectPrefix is prepended to the name of each S3 object that is created
|
||||
// to store uploaded files. It can be used to create a pseudo-directory
|
||||
// structure in the bucket, e.g. "path/to/my/uploads".
|
||||
return id
|
||||
}
|
||||
|
||||
private async uploadPart(
|
||||
metadata: MetadataValue,
|
||||
readStream: fs.ReadStream | Readable,
|
||||
partNumber: number
|
||||
): Promise<string> {
|
||||
const data = await this.client.uploadPart({
|
||||
Bucket: this.bucket,
|
||||
Key: metadata.file.id,
|
||||
UploadId: metadata['upload-id'],
|
||||
PartNumber: partNumber,
|
||||
Body: readStream,
|
||||
})
|
||||
log(`[${metadata.file.id}] finished uploading part #${partNumber}`)
|
||||
return data.ETag as string
|
||||
}
|
||||
|
||||
private async uploadIncompletePart(
|
||||
id: string,
|
||||
readStream: fs.ReadStream | Readable
|
||||
): Promise<string> {
|
||||
const data = await this.client.putObject({
|
||||
Bucket: this.bucket,
|
||||
Key: this.partKey(id, true),
|
||||
Body: readStream,
|
||||
Tagging: this.useCompleteTag('false'),
|
||||
})
|
||||
log(`[${id}] finished uploading incomplete part`)
|
||||
return data.ETag as string
|
||||
}
|
||||
|
||||
private async downloadIncompletePart(id: string) {
|
||||
const incompletePart = await this.getIncompletePart(id)
|
||||
|
||||
if (!incompletePart) {
|
||||
return
|
||||
}
|
||||
const filePath = await this.uniqueTmpFileName('tus-s3-incomplete-part-')
|
||||
|
||||
try {
|
||||
let incompletePartSize = 0
|
||||
|
||||
const byteCounterTransform = new stream.Transform({
|
||||
transform(chunk, _, callback) {
|
||||
incompletePartSize += chunk.length
|
||||
callback(null, chunk)
|
||||
},
|
||||
})
|
||||
|
||||
// write to temporary file
|
||||
await streamProm.pipeline(
|
||||
incompletePart,
|
||||
byteCounterTransform,
|
||||
fs.createWriteStream(filePath)
|
||||
)
|
||||
|
||||
const createReadStream = (options: { cleanUpOnEnd: boolean }) => {
|
||||
const fileReader = fs.createReadStream(filePath)
|
||||
|
||||
if (options.cleanUpOnEnd) {
|
||||
fileReader.on('end', () => {
|
||||
fs.unlink(filePath, () => {
|
||||
// ignore
|
||||
})
|
||||
})
|
||||
|
||||
fileReader.on('error', (err) => {
|
||||
fileReader.destroy(err)
|
||||
fs.unlink(filePath, () => {
|
||||
// ignore
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
return fileReader
|
||||
}
|
||||
|
||||
return {
|
||||
size: incompletePartSize,
|
||||
path: filePath,
|
||||
createReader: createReadStream,
|
||||
}
|
||||
} catch (err) {
|
||||
fsProm.rm(filePath).catch(() => {
|
||||
/* ignore */
|
||||
})
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
private async getIncompletePart(id: string): Promise<Readable | undefined> {
|
||||
try {
|
||||
const data = await this.client.getObject({
|
||||
Bucket: this.bucket,
|
||||
Key: this.partKey(id, true),
|
||||
})
|
||||
return data.Body as Readable
|
||||
} catch (error) {
|
||||
if (error instanceof NoSuchKey) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async getIncompletePartSize(id: string): Promise<number | undefined> {
|
||||
try {
|
||||
const data = await this.client.headObject({
|
||||
Bucket: this.bucket,
|
||||
Key: this.partKey(id, true),
|
||||
})
|
||||
return data.ContentLength
|
||||
} catch (error) {
|
||||
if (error instanceof NotFound) {
|
||||
return undefined
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async deleteIncompletePart(id: string): Promise<void> {
|
||||
await this.client.deleteObject({
|
||||
Bucket: this.bucket,
|
||||
Key: this.partKey(id, true),
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads a stream to s3 using multiple parts
|
||||
*/
|
||||
private async uploadParts(
|
||||
metadata: MetadataValue,
|
||||
readStream: stream.Readable,
|
||||
currentPartNumber: number,
|
||||
offset: number
|
||||
): Promise<number> {
|
||||
const size = metadata.file.size
|
||||
const promises: Promise<void>[] = []
|
||||
let pendingChunkFilepath: string | null = null
|
||||
let bytesUploaded = 0
|
||||
let permit: Permit | undefined = undefined
|
||||
|
||||
const splitterStream = new StreamSplitter({
|
||||
chunkSize: this.calcOptimalPartSize(size),
|
||||
directory: os.tmpdir(),
|
||||
})
|
||||
.on('beforeChunkStarted', async () => {
|
||||
permit = await this.partUploadSemaphore.acquire()
|
||||
})
|
||||
.on('chunkStarted', (filepath) => {
|
||||
pendingChunkFilepath = filepath
|
||||
})
|
||||
.on('chunkFinished', ({ path, size: partSize }) => {
|
||||
pendingChunkFilepath = null
|
||||
|
||||
const acquiredPermit = permit
|
||||
const partNumber = currentPartNumber++
|
||||
|
||||
offset += partSize
|
||||
|
||||
const isFinalPart = size === offset
|
||||
|
||||
// biome-ignore lint/suspicious/noAsyncPromiseExecutor: it's fine
|
||||
const deferred = new Promise<void>(async (resolve, reject) => {
|
||||
try {
|
||||
// Only the first chunk of each PATCH request can prepend
|
||||
// an incomplete part (last chunk) from the previous request.
|
||||
const readable = fs.createReadStream(path)
|
||||
readable.on('error', reject)
|
||||
|
||||
if (partSize >= this.minPartSize || isFinalPart) {
|
||||
await this.uploadPart(metadata, readable, partNumber)
|
||||
} else {
|
||||
await this.uploadIncompletePart(metadata.file.id, readable)
|
||||
}
|
||||
|
||||
bytesUploaded += partSize
|
||||
resolve()
|
||||
} catch (error) {
|
||||
reject(error)
|
||||
} finally {
|
||||
fsProm.rm(path).catch(() => {
|
||||
/* ignore */
|
||||
})
|
||||
acquiredPermit?.release()
|
||||
}
|
||||
})
|
||||
|
||||
promises.push(deferred)
|
||||
})
|
||||
.on('chunkError', () => {
|
||||
permit?.release()
|
||||
})
|
||||
|
||||
try {
|
||||
await streamProm.pipeline(readStream, splitterStream)
|
||||
} catch (error) {
|
||||
if (pendingChunkFilepath !== null) {
|
||||
try {
|
||||
await fsProm.rm(pendingChunkFilepath)
|
||||
} catch {
|
||||
log(`[${metadata.file.id}] failed to remove chunk ${pendingChunkFilepath}`)
|
||||
}
|
||||
}
|
||||
|
||||
promises.push(Promise.reject(error))
|
||||
} finally {
|
||||
await Promise.all(promises)
|
||||
}
|
||||
|
||||
return bytesUploaded
|
||||
}
|
||||
|
||||
/**
|
||||
* Completes a multipart upload on S3.
|
||||
* This is where S3 concatenates all the uploaded parts.
|
||||
*/
|
||||
private async finishMultipartUpload(metadata: MetadataValue, parts: Array<AWS.Part>) {
|
||||
const response = await this.client.completeMultipartUpload({
|
||||
Bucket: this.bucket,
|
||||
Key: metadata.file.id,
|
||||
UploadId: metadata['upload-id'],
|
||||
MultipartUpload: {
|
||||
Parts: parts.map((part) => {
|
||||
return {
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
}
|
||||
}),
|
||||
},
|
||||
})
|
||||
return response.Location
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the number of complete parts/chunks already uploaded to S3.
|
||||
* Retrieves only consecutive parts.
|
||||
*/
|
||||
private async retrieveParts(
|
||||
id: string,
|
||||
partNumberMarker?: string
|
||||
): Promise<Array<AWS.Part>> {
|
||||
const metadata = await this.getMetadata(id)
|
||||
|
||||
const params: AWS.ListPartsCommandInput = {
|
||||
Bucket: this.bucket,
|
||||
Key: id,
|
||||
UploadId: metadata['upload-id'],
|
||||
PartNumberMarker: partNumberMarker,
|
||||
}
|
||||
|
||||
const data = await this.client.listParts(params)
|
||||
|
||||
let parts = data.Parts ?? []
|
||||
|
||||
if (data.IsTruncated) {
|
||||
const rest = await this.retrieveParts(id, data.NextPartNumberMarker)
|
||||
parts = [...parts, ...rest]
|
||||
}
|
||||
|
||||
if (!partNumberMarker) {
|
||||
// biome-ignore lint/style/noNonNullAssertion: it's fine
|
||||
parts.sort((a, b) => a.PartNumber! - b.PartNumber!)
|
||||
}
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes cached data for a given file.
|
||||
*/
|
||||
private async clearCache(id: string) {
|
||||
log(`[${id}] removing cached data`)
|
||||
await this.cache.delete(id)
|
||||
}
|
||||
|
||||
private calcOptimalPartSize(size?: number): number {
|
||||
// When upload size is not know we assume largest possible value (`maxUploadSize`)
|
||||
if (size === undefined) {
|
||||
size = this.maxUploadSize
|
||||
}
|
||||
|
||||
let optimalPartSize: number
|
||||
|
||||
// When upload is smaller or equal to PreferredPartSize, we upload in just one part.
|
||||
if (size <= this.preferredPartSize) {
|
||||
optimalPartSize = size
|
||||
}
|
||||
// Does the upload fit in MaxMultipartParts parts or less with PreferredPartSize.
|
||||
else if (size <= this.preferredPartSize * this.maxMultipartParts) {
|
||||
optimalPartSize = this.preferredPartSize
|
||||
// The upload is too big for the preferred size.
|
||||
// We devide the size with the max amount of parts and round it up.
|
||||
} else {
|
||||
optimalPartSize = Math.ceil(size / this.maxMultipartParts)
|
||||
}
|
||||
|
||||
return optimalPartSize
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a multipart upload on S3 attaching any metadata to it.
|
||||
* Also, a `${file_id}.info` file is created which holds some information
|
||||
* about the upload itself like: `upload-id`, `upload-length`, etc.
|
||||
*/
|
||||
public async create(upload: Upload) {
|
||||
log(`[${upload.id}] initializing multipart upload`)
|
||||
const request: AWS.CreateMultipartUploadCommandInput = {
|
||||
Bucket: this.bucket,
|
||||
Key: upload.id,
|
||||
Metadata: { 'tus-version': TUS_RESUMABLE },
|
||||
}
|
||||
|
||||
if (upload.metadata?.contentType) {
|
||||
request.ContentType = upload.metadata.contentType
|
||||
}
|
||||
|
||||
if (upload.metadata?.cacheControl) {
|
||||
request.CacheControl = upload.metadata.cacheControl
|
||||
}
|
||||
|
||||
upload.creation_date = new Date().toISOString()
|
||||
|
||||
const res = await this.client.createMultipartUpload(request)
|
||||
upload.storage = {
|
||||
type: 's3',
|
||||
path: res.Key as string,
|
||||
bucket: this.bucket,
|
||||
}
|
||||
await this.saveMetadata(upload, res.UploadId as string)
|
||||
log(`[${upload.id}] multipart upload created (${res.UploadId})`)
|
||||
|
||||
return upload
|
||||
}
|
||||
|
||||
async read(id: string) {
|
||||
const data = await this.client.getObject({
|
||||
Bucket: this.bucket,
|
||||
Key: id,
|
||||
})
|
||||
return data.Body as Readable
|
||||
}
|
||||
|
||||
/**
|
||||
* Write to the file, starting at the provided offset
|
||||
*/
|
||||
public async write(src: stream.Readable, id: string, offset: number): Promise<number> {
|
||||
// Metadata request needs to happen first
|
||||
const metadata = await this.getMetadata(id)
|
||||
const parts = await this.retrieveParts(id)
|
||||
// biome-ignore lint/style/noNonNullAssertion: it's fine
|
||||
const partNumber: number = parts.length > 0 ? parts[parts.length - 1].PartNumber! : 0
|
||||
const nextPartNumber = partNumber + 1
|
||||
|
||||
const incompletePart = await this.downloadIncompletePart(id)
|
||||
const requestedOffset = offset
|
||||
|
||||
if (incompletePart) {
|
||||
// once the file is on disk, we delete the incomplete part
|
||||
await this.deleteIncompletePart(id)
|
||||
|
||||
offset = requestedOffset - incompletePart.size
|
||||
src = new MultiStream([incompletePart.createReader({ cleanUpOnEnd: true }), src])
|
||||
}
|
||||
|
||||
const bytesUploaded = await this.uploadParts(metadata, src, nextPartNumber, offset)
|
||||
|
||||
// The size of the incomplete part should not be counted, because the
|
||||
// process of the incomplete part should be fully transparent to the user.
|
||||
const newOffset = requestedOffset + bytesUploaded - (incompletePart?.size ?? 0)
|
||||
|
||||
if (metadata.file.size === newOffset) {
|
||||
try {
|
||||
const parts = await this.retrieveParts(id)
|
||||
await this.finishMultipartUpload(metadata, parts)
|
||||
await this.completeMetadata(metadata.file)
|
||||
await this.clearCache(id)
|
||||
} catch (error) {
|
||||
log(`[${id}] failed to finish upload`, error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
return newOffset
|
||||
}
|
||||
|
||||
public async getUpload(id: string): Promise<Upload> {
|
||||
let metadata: MetadataValue
|
||||
try {
|
||||
metadata = await this.getMetadata(id)
|
||||
} catch (error) {
|
||||
log('getUpload: No file found.', error)
|
||||
throw ERRORS.FILE_NOT_FOUND
|
||||
}
|
||||
|
||||
let offset = 0
|
||||
|
||||
try {
|
||||
const parts = await this.retrieveParts(id)
|
||||
offset = calcOffsetFromParts(parts)
|
||||
} catch (error: any) {
|
||||
// Check if the error is caused by the upload not being found. This happens
|
||||
// when the multipart upload has already been completed or aborted. Since
|
||||
// we already found the info object, we know that the upload has been
|
||||
// completed and therefore can ensure the the offset is the size.
|
||||
// AWS S3 returns NoSuchUpload, but other implementations, such as DigitalOcean
|
||||
// Spaces, can also return NoSuchKey.
|
||||
if (error.Code === 'NoSuchUpload' || error.Code === 'NoSuchKey') {
|
||||
return new Upload({
|
||||
...metadata.file,
|
||||
offset: metadata.file.size as number,
|
||||
size: metadata.file.size,
|
||||
metadata: metadata.file.metadata,
|
||||
storage: metadata.file.storage,
|
||||
})
|
||||
}
|
||||
|
||||
log(error)
|
||||
throw error
|
||||
}
|
||||
|
||||
const incompletePartSize = await this.getIncompletePartSize(id)
|
||||
|
||||
return new Upload({
|
||||
...metadata.file,
|
||||
offset: offset + (incompletePartSize ?? 0),
|
||||
size: metadata.file.size,
|
||||
storage: metadata.file.storage,
|
||||
})
|
||||
}
|
||||
|
||||
public async declareUploadLength(file_id: string, upload_length: number) {
|
||||
const { file, 'upload-id': uploadId } = await this.getMetadata(file_id)
|
||||
if (!file) {
|
||||
throw ERRORS.FILE_NOT_FOUND
|
||||
}
|
||||
|
||||
file.size = upload_length
|
||||
|
||||
await this.saveMetadata(file, uploadId)
|
||||
}
|
||||
|
||||
public async remove(id: string): Promise<void> {
|
||||
try {
|
||||
const { 'upload-id': uploadId } = await this.getMetadata(id)
|
||||
if (uploadId) {
|
||||
await this.client.abortMultipartUpload({
|
||||
Bucket: this.bucket,
|
||||
Key: id,
|
||||
UploadId: uploadId,
|
||||
})
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error?.code && ['NotFound', 'NoSuchKey', 'NoSuchUpload'].includes(error.Code)) {
|
||||
log('remove: No file found.', error)
|
||||
throw ERRORS.FILE_NOT_FOUND
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
await this.client.deleteObjects({
|
||||
Bucket: this.bucket,
|
||||
Delete: {
|
||||
Objects: [{ Key: id }, { Key: this.infoKey(id) }],
|
||||
},
|
||||
})
|
||||
|
||||
this.clearCache(id)
|
||||
}
|
||||
|
||||
protected getExpirationDate(created_at: string) {
|
||||
const date = new Date(created_at)
|
||||
|
||||
return new Date(date.getTime() + this.getExpiration())
|
||||
}
|
||||
|
||||
getExpiration(): number {
|
||||
return this.expirationPeriodInMilliseconds
|
||||
}
|
||||
|
||||
async deleteExpired(): Promise<number> {
|
||||
if (this.getExpiration() === 0) {
|
||||
return 0
|
||||
}
|
||||
|
||||
let keyMarker: string | undefined = undefined
|
||||
let uploadIdMarker: string | undefined = undefined
|
||||
let isTruncated = true
|
||||
let deleted = 0
|
||||
|
||||
while (isTruncated) {
|
||||
const listResponse: AWS.ListMultipartUploadsCommandOutput =
|
||||
await this.client.listMultipartUploads({
|
||||
Bucket: this.bucket,
|
||||
KeyMarker: keyMarker,
|
||||
UploadIdMarker: uploadIdMarker,
|
||||
})
|
||||
|
||||
const expiredUploads =
|
||||
listResponse.Uploads?.filter((multiPartUpload) => {
|
||||
const initiatedDate = multiPartUpload.Initiated
|
||||
return (
|
||||
initiatedDate &&
|
||||
new Date().getTime() >
|
||||
this.getExpirationDate(initiatedDate.toISOString()).getTime()
|
||||
)
|
||||
}) || []
|
||||
|
||||
const objectsToDelete = expiredUploads.reduce(
|
||||
(all, expiredUpload) => {
|
||||
all.push(
|
||||
{
|
||||
key: this.infoKey(expiredUpload.Key as string),
|
||||
},
|
||||
{
|
||||
key: this.partKey(expiredUpload.Key as string, true),
|
||||
}
|
||||
)
|
||||
return all
|
||||
},
|
||||
[] as { key: string }[]
|
||||
)
|
||||
|
||||
const deletions: Promise<AWS.DeleteObjectsCommandOutput>[] = []
|
||||
|
||||
// Batch delete 1000 items at a time
|
||||
while (objectsToDelete.length > 0) {
|
||||
const objects = objectsToDelete.splice(0, 1000)
|
||||
deletions.push(
|
||||
this.client.deleteObjects({
|
||||
Bucket: this.bucket,
|
||||
Delete: {
|
||||
Objects: objects.map((object) => ({
|
||||
Key: object.key,
|
||||
})),
|
||||
},
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
const [objectsDeleted] = await Promise.all([
|
||||
Promise.all(deletions),
|
||||
...expiredUploads.map((expiredUpload) => {
|
||||
return this.client.abortMultipartUpload({
|
||||
Bucket: this.bucket,
|
||||
Key: expiredUpload.Key,
|
||||
UploadId: expiredUpload.UploadId,
|
||||
})
|
||||
}),
|
||||
])
|
||||
|
||||
deleted += objectsDeleted.reduce((all, acc) => all + (acc.Deleted?.length ?? 0), 0)
|
||||
|
||||
isTruncated = Boolean(listResponse.IsTruncated)
|
||||
|
||||
if (isTruncated) {
|
||||
keyMarker = listResponse.NextKeyMarker
|
||||
uploadIdMarker = listResponse.NextUploadIdMarker
|
||||
}
|
||||
}
|
||||
|
||||
return deleted
|
||||
}
|
||||
|
||||
private async uniqueTmpFileName(template: string): Promise<string> {
|
||||
let tries = 0
|
||||
const maxTries = 10
|
||||
|
||||
while (tries < maxTries) {
|
||||
const fileName =
|
||||
template + crypto.randomBytes(10).toString('base64url').slice(0, 10)
|
||||
const filePath = path.join(os.tmpdir(), fileName)
|
||||
|
||||
try {
|
||||
await fsProm.lstat(filePath)
|
||||
// If no error, file exists, so try again
|
||||
tries++
|
||||
} catch (e: any) {
|
||||
if (e.code === 'ENOENT') {
|
||||
// File does not exist, return the path
|
||||
return filePath
|
||||
}
|
||||
throw e // For other errors, rethrow
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Could not find a unique file name after ${maxTries} tries`)
|
||||
}
|
||||
}
|
1655
pnpm-lock.yaml
1655
pnpm-lock.yaml
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue