This commit is contained in:
longdayi 2025-05-28 08:25:43 +08:00
commit dc85586930
75 changed files with 8249 additions and 292 deletions

View File

@ -4,9 +4,9 @@ TIMEZONE=Asia/Singapore
POSTGRES_VERSION=latest
# PostgreSQL Env
POSTGRES_DB=nice
POSTGRES_DB=db
POSTGRES_USER=nice
POSTGRES_PASSWORD=nice
POSTGRES_PASSWORD=Letusdoit123
# Redis 配置
REDIS_VERSION=7.2.4
@ -14,8 +14,8 @@ REDIS_PASSWORD=nice
# MinIO 配置
MINIO_VERSION=latest
MINIO_ACCESS_KEY=nice
MINIO_SECRET_KEY=nice123
MINIO_ACCESS_KEY=nice1234
MINIO_SECRET_KEY=nice1234
# Elasticsearch 配置
ELASTIC_VERSION=9.0.1
ELASTIC_PASSWORD=nice_elastic_password

View File

@ -1,6 +1,6 @@
ELASTICSEARCH_NODE=http://localhost:9200
ELASTICSEARCH_USER=elastic
ELASTICSEARCH_PASSWORD=changeme
ELASTICSEARCH_PASSWORD=nice_elastic_password
MINIO_ENDPOINT=localhost
MINIO_PORT=9000
MINIO_USE_SSL=false
@ -12,4 +12,6 @@ REDIS_PASSWORD=nice
# OIDC_COOKIE_KEY=
OIDC_CLIENT_ID=your-client-id
OIDC_CLIENT_SECRET=your-client-secret
OIDC_REDIRECT_URI=https://your-frontend.com/callback
OIDC_REDIRECT_URI=https://your-frontend.com/callback
UPLOAD_DIR=/opt/projects/nice/uploads

View File

@ -29,4 +29,4 @@
"supertest": "^7.1.1",
"vitest": "^3.1.4"
}
}
}

View File

@ -1,56 +1,78 @@
import { Hono } from 'hono'
import { contextStorage, getContext } from 'hono/context-storage'
import { prettyJSON } from 'hono/pretty-json'
import { cors } from 'hono/cors'
import { trpcServer } from '@hono/trpc-server'
import { Hono } from 'hono';
import { logger } from 'hono/logger';
import { contextStorage, getContext } from 'hono/context-storage';
import { prettyJSON } from 'hono/pretty-json';
import { cors } from 'hono/cors';
import Redis from 'ioredis'
import redis from './redis'
import minioClient from './minio'
import { Client } from 'minio'
import { appRouter } from './trpc'
import { oidcApp } from './oidc-demo'
import { trpcServer } from '@hono/trpc-server';
import Redis from 'ioredis';
import redis from './redis';
import minioClient from './minio';
import { Client } from 'minio';
import { appRouter } from './trpc';
import { createBunWebSocket } from 'hono/bun';
import { wsHandler, wsConfig } from './socket';
// 导入新的路由
import userRest from './user/user.rest';
import uploadRest from './upload/upload.rest';
import { startCleanupScheduler } from './upload/scheduler';
type Env = {
Variables: {
redis: Redis
minio: Client
}
}
Variables: {
redis: Redis;
minio: Client;
};
};
const app = new Hono<Env>()
const app = new Hono<Env>();
// 全局CORS配置
app.use('*', cors({
origin: '*',
}))
// 注入依赖
app.use('*', async (c, next) => {
c.set('redis', redis)
c.set('minio', minioClient)
await next()
})
// 中间件
app.use(contextStorage())
app.use(prettyJSON())
app.route('/oidc', oidcApp)
// 挂载tRPC
app.use(
'/trpc/*',
trpcServer({
router: appRouter,
})
)
'*',
cors({
origin: 'http://localhost:3001',
credentials: true,
}),
);
// 启动服务器
const port = parseInt(process.env.PORT || '3000');
export default {
port,
fetch: app.fetch,
}
app.use('*', async (c, next) => {
c.set('redis', redis);
c.set('minio', minioClient);
await next();
});
app.use('*', async (c, next) => {
c.set('redis', redis);
await next();
});
app.use(contextStorage());
app.use(prettyJSON()); // With options: prettyJSON({ space: 4 })
app.use(logger());
app.use(
'/trpc/*',
trpcServer({
router: appRouter,
}),
);
console.log(`🚀 服务器运行在 http://localhost:${port}`)
// 添加 REST API 路由
app.route('/api/users', userRest);
app.route('/api/upload', uploadRest);
// 添加 WebSocket 路由
app.get('/ws', wsHandler);
// 启动上传清理定时任务
startCleanupScheduler();
const bunServerConfig = {
port: 3000,
fetch: app.fetch,
...wsConfig,
};
// 启动 Bun 服务器
Bun.serve(bunServerConfig);
export default app;

View File

@ -0,0 +1,8 @@
import { router } from './trpc';
import { userRouter } from './user/user.trpc';
export const appRouter = router({
user: userRouter,
});
export type AppRouter = typeof appRouter;

131
apps/backend/src/socket.ts Normal file
View File

@ -0,0 +1,131 @@
import { createBunWebSocket } from 'hono/bun';
import type { ServerWebSocket } from 'bun';
import { WSContext } from 'hono/ws';
enum MessageType {
TEXT = 'text',
IMAGE = 'image',
FILE = 'file',
RETRACT = 'retract', // 用于实现撤回
}
interface WSMessageParams {
text?: string;
type?: MessageType;
fileUri?: string;
}
// 定义消息类型接口
interface WSMessage {
action: 'join' | 'leave' | 'message';
roomId: string;
data: WSMessageParams;
}
// 创建 WebSocket 实例并指定泛型类型
const { upgradeWebSocket, websocket } = createBunWebSocket<ServerWebSocket>();
// 存储房间和连接的映射关系
const rooms = new Map<string, Set<WSContext<ServerWebSocket>>>();
// WebSocket 处理器
const wsHandler = upgradeWebSocket((c) => {
// 存储当前连接加入的房间
const joinedRooms = new Set<string>();
return {
onOpen(_, ws) {
console.log('WebSocket 连接已建立');
},
onMessage(message, ws) {
try {
const parsedMessage: WSMessage = JSON.parse(message.data as any);
console.log('收到消息:', parsedMessage);
switch (parsedMessage.action) {
case 'join':
// 加入房间
if (!rooms.has(parsedMessage.roomId)) {
rooms.set(parsedMessage.roomId, new Set());
}
rooms.get(parsedMessage.roomId)?.add(ws);
joinedRooms.add(parsedMessage.roomId);
// 发送加入成功消息
ws.send(
JSON.stringify({
action: 'system',
data: {
text: `成功加入房间 ${parsedMessage.roomId}`,
type: MessageType.TEXT,
},
roomId: parsedMessage.roomId,
}),
);
break;
case 'leave':
// 离开房间
rooms.get(parsedMessage.roomId)?.delete(ws);
joinedRooms.delete(parsedMessage.roomId);
if (rooms.get(parsedMessage.roomId)?.size === 0) {
rooms.delete(parsedMessage.roomId);
}
break;
case 'message':
// 在指定房间内广播消息
const room = rooms.get(parsedMessage.roomId);
if (room) {
const messageToSend = {
action: 'message',
data: parsedMessage.data,
roomId: parsedMessage.roomId,
};
for (const conn of room) {
if (conn.readyState === WebSocket.OPEN) {
conn.send(JSON.stringify(messageToSend));
}
}
}
break;
}
} catch (error) {
console.error('处理消息时出错:', error);
ws.send(
JSON.stringify({
action: 'error',
data: {
text: '消息处理失败',
type: MessageType.TEXT,
},
}),
);
}
},
onClose(_, ws) {
console.log('WebSocket 连接已关闭');
// 清理所有加入的房间
for (const roomId of joinedRooms) {
rooms.get(roomId)?.delete(ws);
if (rooms.get(roomId)?.size === 0) {
rooms.delete(roomId);
}
}
},
onError(error, ws) {
console.error('WebSocket 错误:', error);
// 清理所有加入的房间
for (const roomId of joinedRooms) {
rooms.get(roomId)?.delete(ws);
if (rooms.get(roomId)?.size === 0) {
rooms.delete(roomId);
}
}
},
};
});
export const wsConfig = {
websocket,
};
export { wsHandler, rooms };

View File

@ -0,0 +1,232 @@
# 上传模块架构改造
本模块已从 NestJS 架构成功改造为 Hono + Bun 架构,并支持多种存储后端的无感切换。
## 文件结构
```
src/upload/
├── tus.ts # TUS 协议服务核心实现
├── upload.index.ts # 资源管理相关函数
├── upload.rest.ts # Hono REST API 路由
├── storage.adapter.ts # 存储适配器系统 🆕
├── storage.utils.ts # 存储工具类 🆕
├── scheduler.ts # 定时清理任务
├── utils.ts # 工具函数
├── types.ts # 类型定义
└── README.md # 本文档
```
## 存储适配器系统
### 支持的存储类型
1. **本地存储 (Local)** - 文件存储在本地文件系统
2. **S3 存储 (S3)** - 文件存储在 AWS S3 或兼容的对象存储服务
### 环境变量配置
#### 本地存储配置
```bash
STORAGE_TYPE=local
UPLOAD_DIR=./uploads
UPLOAD_EXPIRATION_MS=0 # 0 表示不自动过期(推荐设置)
```
#### S3 存储配置
```bash
STORAGE_TYPE=s3
S3_BUCKET=your-bucket-name
S3_REGION=us-east-1
S3_ACCESS_KEY_ID=your-access-key
S3_SECRET_ACCESS_KEY=your-secret-key
S3_ENDPOINT=https://s3.amazonaws.com # 可选,支持其他 S3 兼容服务
S3_FORCE_PATH_STYLE=false # 可选,路径风格
S3_PART_SIZE=8388608 # 可选,分片大小 (8MB)
S3_MAX_CONCURRENT_UPLOADS=60 # 可选,最大并发上传数
UPLOAD_EXPIRATION_MS=0 # 0 表示不自动过期(推荐设置)
```
### 存储类型记录
- **数据库支持**: 每个资源记录都包含 `storageType` 字段,标识文件使用的存储后端
- **自动记录**: 上传时自动记录当前的存储类型
- **迁移支持**: 支持批量更新现有资源的存储类型标记
### 不过期设置
- **默认行为**: 过期时间默认设为 0表示文件不会自动过期
- **手动清理**: 提供多种手动清理选项
- **灵活控制**: 可根据需要设置过期时间,或完全禁用自动清理
### 无感切换机制
1. **单例模式管理**: `StorageManager` 使用单例模式确保全局一致性
2. **自动配置检测**: 启动时根据环境变量自动选择存储类型
3. **统一接口**: 所有存储类型都实现相同的 TUS `DataStore` 接口
4. **运行时切换**: 支持运行时切换存储配置(需要重启生效)
## API 端点
### 资源管理
- `GET /api/upload/resource/:fileId` - 获取文件资源信息
- `GET /api/upload/resources` - 获取所有资源
- `GET /api/upload/resources/storage/:storageType` - 🆕 根据存储类型获取资源
- `GET /api/upload/resources/status/:status` - 🆕 根据状态获取资源
- `GET /api/upload/resources/uploading` - 🆕 获取正在上传的资源
- `GET /api/upload/stats` - 🆕 获取资源统计信息
- `DELETE /api/upload/resource/:id` - 删除资源
- `PATCH /api/upload/resource/:id` - 更新资源
- `POST /api/upload/cleanup` - 手动触发清理
- `POST /api/upload/cleanup/by-status` - 🆕 根据状态清理资源
- `POST /api/upload/migrate-storage` - 🆕 迁移资源存储类型标记
### 存储管理
- `GET /api/upload/storage/info` - 获取当前存储配置信息
- `POST /api/upload/storage/switch` - 切换存储类型
- `POST /api/upload/storage/validate` - 验证存储配置
### TUS 协议
- `OPTIONS /api/upload/*` - TUS 协议选项请求
- `HEAD /api/upload/*` - TUS 协议头部请求
- `POST /api/upload/*` - TUS 协议创建上传
- `PATCH /api/upload/*` - TUS 协议上传数据
- `GET /api/upload/*` - TUS 协议获取状态
## 新增 API 使用示例
### 获取存储类型统计
```javascript
const response = await fetch('/api/upload/stats');
const stats = await response.json();
// {
// total: 150,
// byStatus: { "UPLOADED": 120, "UPLOADING": 5, "PROCESSED": 25 },
// byStorageType: { "local": 80, "s3": 70 }
// }
```
### 查询特定存储类型的资源
```javascript
const response = await fetch('/api/upload/resources/storage/s3');
const s3Resources = await response.json();
```
### 迁移存储类型标记
```javascript
const response = await fetch('/api/upload/migrate-storage', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ from: 'local', to: 's3' }),
});
// { success: true, message: "Migrated 50 resources from local to s3", count: 50 }
```
### 手动清理特定状态的资源
```javascript
const response = await fetch('/api/upload/cleanup/by-status', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
status: 'UPLOADING',
olderThanDays: 7,
}),
});
```
## 🆕 存储管理示例
### 获取存储信息
```javascript
const response = await fetch('/api/upload/storage/info');
const storageInfo = await response.json();
// { type: 'local', config: { directory: './uploads' } }
```
### 切换到 S3 存储
```javascript
const newConfig = {
type: 's3',
s3: {
bucket: 'my-bucket',
region: 'us-west-2',
accessKeyId: 'YOUR_ACCESS_KEY',
secretAccessKey: 'YOUR_SECRET_KEY',
},
};
const response = await fetch('/api/upload/storage/switch', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(newConfig),
});
```
### 验证存储配置
```javascript
const response = await fetch('/api/upload/storage/validate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(newConfig),
});
const validation = await response.json();
// { valid: true, message: 'Storage configuration is valid' }
```
## 特性保留
1. **TUS 协议支持** - 完全保留原有的断点续传功能
2. **文件命名** - 保留安全的文件命名策略
3. **资源状态管理** - 保留完整的上传状态跟踪
4. **自动清理** - 保留过期文件清理功能(默认禁用)
5. **数据库集成** - 保留 Prisma ORM 数据库操作
## 🆕 新增特性
1. **多存储后端支持** - 支持本地存储和 S3 存储
2. **无感切换** - 运行时可切换存储类型
3. **配置验证** - 提供存储配置验证功能
4. **存储信息查询** - 可查询当前存储配置
5. **统一日志** - 存储操作统一日志记录
6. **🆕 存储类型记录** - 数据库记录每个资源的存储类型
7. **🆕 灵活清理** - 支持按状态、时间等条件清理
8. **🆕 统计分析** - 提供详细的资源统计信息
9. **🆕 不过期设置** - 默认不自动过期,避免意外删除
## 运行
服务启动时会自动:
1. 根据环境变量初始化存储适配器
2. 初始化 TUS 服务器
3. 注册 REST API 路由
4. 启动定时清理任务(如果启用)
支持的存储切换场景:
- 开发环境使用本地存储
- 生产环境使用 S3 存储
- 混合云部署灵活切换
- 存储迁移时批量更新资源标记
## 💡 最佳实践
1. **过期设置**: 推荐设置 `UPLOAD_EXPIRATION_MS=0` 避免文件意外过期
2. **存储记录**: 利用数据库中的 `storageType` 字段追踪文件位置
3. **定期清理**: 使用手动清理 API 定期清理不需要的资源
4. **监控统计**: 使用统计 API 监控存储使用情况
5. **迁移策略**: 在存储迁移时先更新环境变量,再使用迁移 API 更新数据库标记
无需代码修改,仅通过环境变量即可实现存储后端的无感切换。

View File

@ -0,0 +1,40 @@
import { cleanupExpiredUploads } from './tus';
// 设置定时清理任务 - 每天凌晨执行
export function startCleanupScheduler() {
// 每 24 小时执行一次清理任务
const CLEANUP_INTERVAL = 24 * 60 * 60 * 1000; // 24 hours in milliseconds
// 立即执行一次清理
setTimeout(async () => {
console.log('Starting initial cleanup...');
try {
await cleanupExpiredUploads();
} catch (error) {
console.error('Initial cleanup failed:', error);
}
}, 5000); // 启动 5 秒后执行
// 设置定期清理
setInterval(async () => {
console.log('Starting scheduled cleanup...');
try {
await cleanupExpiredUploads();
} catch (error) {
console.error('Scheduled cleanup failed:', error);
}
}, CLEANUP_INTERVAL);
console.log('Upload cleanup scheduler started - will run every 24 hours');
}
// 手动触发清理(可用于 API 调用)
export async function triggerCleanup() {
console.log('Manual cleanup triggered...');
try {
return await cleanupExpiredUploads();
} catch (error) {
console.error('Manual cleanup failed:', error);
throw error;
}
}

View File

@ -0,0 +1,185 @@
import { FileStore, S3Store } from '@repo/tus';
import type { DataStore } from '@repo/tus';
// 存储类型枚举
export enum StorageType {
LOCAL = 'local',
S3 = 's3',
}
// 存储配置接口
export interface StorageConfig {
type: StorageType;
// 本地存储配置
local?: {
directory: string;
expirationPeriodInMilliseconds?: number;
};
// S3 存储配置
s3?: {
bucket: string;
region: string;
accessKeyId: string;
secretAccessKey: string;
endpoint?: string; // 用于兼容其他 S3 兼容服务
forcePathStyle?: boolean;
partSize?: number;
maxConcurrentPartUploads?: number;
expirationPeriodInMilliseconds?: number;
};
}
// 从环境变量获取存储配置
export function getStorageConfig(): StorageConfig {
const storageType = (process.env.STORAGE_TYPE || 'local') as StorageType;
const config: StorageConfig = {
type: storageType,
};
if (storageType === StorageType.LOCAL) {
config.local = {
directory: process.env.UPLOAD_DIR || './uploads',
expirationPeriodInMilliseconds: parseInt(process.env.UPLOAD_EXPIRATION_MS || '0'), // 默认不过期
};
} else if (storageType === StorageType.S3) {
config.s3 = {
bucket: process.env.S3_BUCKET || '',
region: process.env.S3_REGION || 'us-east-1',
accessKeyId: process.env.S3_ACCESS_KEY_ID || '',
secretAccessKey: process.env.S3_SECRET_ACCESS_KEY || '',
endpoint: process.env.S3_ENDPOINT,
forcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true',
partSize: parseInt(process.env.S3_PART_SIZE || '8388608'), // 8MB
maxConcurrentPartUploads: parseInt(process.env.S3_MAX_CONCURRENT_UPLOADS || '60'),
expirationPeriodInMilliseconds: parseInt(process.env.UPLOAD_EXPIRATION_MS || '0'), // 默认不过期
};
}
return config;
}
// 验证存储配置
export function validateStorageConfig(config: StorageConfig): string[] {
const errors: string[] = [];
if (config.type === StorageType.LOCAL) {
if (!config.local?.directory) {
errors.push('Local storage directory is required');
}
} else if (config.type === StorageType.S3) {
const s3Config = config.s3;
if (!s3Config?.bucket) errors.push('S3 bucket is required');
if (!s3Config?.region) errors.push('S3 region is required');
if (!s3Config?.accessKeyId) errors.push('S3 access key ID is required');
if (!s3Config?.secretAccessKey) errors.push('S3 secret access key is required');
} else {
errors.push(`Unsupported storage type: ${config.type}`);
}
return errors;
}
// 创建存储实例
export function createStorageInstance(config: StorageConfig): DataStore {
// 验证配置
const errors = validateStorageConfig(config);
if (errors.length > 0) {
throw new Error(`Storage configuration errors: ${errors.join(', ')}`);
}
switch (config.type) {
case StorageType.LOCAL:
return new FileStore({
directory: config.local!.directory,
expirationPeriodInMilliseconds: config.local!.expirationPeriodInMilliseconds,
});
case StorageType.S3:
const s3Config = config.s3!;
return new S3Store({
partSize: s3Config.partSize,
maxConcurrentPartUploads: s3Config.maxConcurrentPartUploads,
expirationPeriodInMilliseconds: s3Config.expirationPeriodInMilliseconds,
s3ClientConfig: {
bucket: s3Config.bucket,
region: s3Config.region,
credentials: {
accessKeyId: s3Config.accessKeyId,
secretAccessKey: s3Config.secretAccessKey,
},
endpoint: s3Config.endpoint,
forcePathStyle: s3Config.forcePathStyle,
},
});
default:
throw new Error(`Unsupported storage type: ${config.type}`);
}
}
// 存储管理器类
export class StorageManager {
private static instance: StorageManager;
private storageConfig: StorageConfig;
private dataStore: DataStore;
private constructor() {
this.storageConfig = getStorageConfig();
this.dataStore = createStorageInstance(this.storageConfig);
console.log(`Storage initialized: ${this.storageConfig.type}`);
if (this.storageConfig.type === StorageType.LOCAL) {
console.log(`Local directory: ${this.storageConfig.local?.directory}`);
} else if (this.storageConfig.type === StorageType.S3) {
console.log(`S3 bucket: ${this.storageConfig.s3?.bucket} in region: ${this.storageConfig.s3?.region}`);
}
}
public static getInstance(): StorageManager {
if (!StorageManager.instance) {
StorageManager.instance = new StorageManager();
}
return StorageManager.instance;
}
public getDataStore(): DataStore {
return this.dataStore;
}
public getStorageConfig(): StorageConfig {
return this.storageConfig;
}
public getStorageType(): StorageType {
return this.storageConfig.type;
}
// 切换存储类型(需要重启应用)
public async switchStorage(newConfig: StorageConfig): Promise<void> {
const errors = validateStorageConfig(newConfig);
if (errors.length > 0) {
throw new Error(`Invalid storage configuration: ${errors.join(', ')}`);
}
this.storageConfig = newConfig;
this.dataStore = createStorageInstance(newConfig);
console.log(`Storage switched to: ${newConfig.type}`);
}
// 获取存储统计信息
public getStorageInfo() {
return {
type: this.storageConfig.type,
config:
this.storageConfig.type === StorageType.LOCAL
? { directory: this.storageConfig.local?.directory }
: {
bucket: this.storageConfig.s3?.bucket,
region: this.storageConfig.s3?.region,
endpoint: this.storageConfig.s3?.endpoint,
},
};
}
}

View File

@ -0,0 +1,202 @@
import { StorageManager, StorageType } from './storage.adapter';
import path from 'path';
/**
* -
*/
export class StorageUtils {
private static instance: StorageUtils;
private storageManager: StorageManager;
private constructor() {
this.storageManager = StorageManager.getInstance();
}
public static getInstance(): StorageUtils {
if (!StorageUtils.instance) {
StorageUtils.instance = new StorageUtils();
}
return StorageUtils.instance;
}
/**
* 访URL
* @param fileId ID
* @param isPublic 访
* @returns 访URL
*/
public generateFileUrl(fileId: string, isPublic: boolean = false): string {
const storageType = this.storageManager.getStorageType();
const config = this.storageManager.getStorageConfig();
switch (storageType) {
case StorageType.LOCAL:
// 本地存储返回相对路径或服务器路径
if (isPublic) {
// 假设有一个静态文件服务
return `/uploads/${fileId}`;
}
return path.join(config.local?.directory || './uploads', fileId);
case StorageType.S3:
// S3 存储返回对象存储路径
const s3Config = config.s3!;
if (s3Config.endpoint && s3Config.endpoint !== 'https://s3.amazonaws.com') {
// 自定义 S3 兼容服务
return `${s3Config.endpoint}/${s3Config.bucket}/${fileId}`;
}
// AWS S3
return `https://${s3Config.bucket}.s3.${s3Config.region}.amazonaws.com/${fileId}`;
default:
throw new Error(`Unsupported storage type: ${storageType}`);
}
}
/**
* URLS3
* @param fileId ID
* @param expiresIn
* @returns URL
*/
public async generatePresignedUrl(fileId: string, expiresIn: number = 3600): Promise<string> {
const storageType = this.storageManager.getStorageType();
if (storageType !== StorageType.S3) {
throw new Error('Presigned URLs are only supported for S3 storage');
}
// TODO: 实现 S3 预签名 URL 生成
// 这需要使用 AWS SDK 的 getSignedUrl 方法
// const s3Client = this.storageManager.getS3Client();
// return await getSignedUrl(s3Client, new GetObjectCommand({
// Bucket: config.s3!.bucket,
// Key: fileId
// }), { expiresIn });
throw new Error('Presigned URL generation not implemented yet');
}
/**
*
* @param fileId ID
* @returns
*/
public getFilePath(fileId: string): string {
const storageType = this.storageManager.getStorageType();
if (storageType !== StorageType.LOCAL) {
throw new Error('File path is only available for local storage');
}
const config = this.storageManager.getStorageConfig();
return path.join(config.local?.directory || './uploads', fileId);
}
/**
*
* @param fileId ID
* @returns
*/
public async fileExists(fileId: string): Promise<boolean> {
const storageType = this.storageManager.getStorageType();
const dataStore = this.storageManager.getDataStore();
try {
await dataStore.getUpload(fileId);
return true;
} catch (error) {
return false;
}
}
/**
*
* @param fileId ID
*/
public async deleteFile(fileId: string): Promise<void> {
const dataStore = this.storageManager.getDataStore();
await dataStore.remove(fileId);
}
/**
*
* @param fileId ID
* @returns
*/
public async getFileStream(fileId: string): Promise<ReadableStream | NodeJS.ReadableStream> {
const storageType = this.storageManager.getStorageType();
const dataStore = this.storageManager.getDataStore();
if (storageType === StorageType.LOCAL) {
// 本地存储直接返回文件流
return (dataStore as any).read(fileId);
} else if (storageType === StorageType.S3) {
// S3 存储返回对象流
return (dataStore as any).read(fileId);
}
throw new Error(`File stream not supported for storage type: ${storageType}`);
}
/**
*
* @param sourceFileId ID
* @param targetFileId ID
*/
public async copyFile(sourceFileId: string, targetFileId: string): Promise<void> {
const storageType = this.storageManager.getStorageType();
if (storageType === StorageType.LOCAL) {
// 本地存储使用文件系统复制
const fs = await import('fs/promises');
const sourcePath = this.getFilePath(sourceFileId);
const targetPath = this.getFilePath(targetFileId);
await fs.copyFile(sourcePath, targetPath);
} else if (storageType === StorageType.S3) {
// S3 存储使用对象复制
// TODO: 实现 S3 对象复制
throw new Error('S3 file copy not implemented yet');
}
}
/**
*
*/
public async getStorageStats(): Promise<{
storageType: StorageType;
totalFiles?: number;
totalSize?: number;
availableSpace?: number;
}> {
const storageType = this.storageManager.getStorageType();
const config = this.storageManager.getStorageConfig();
const stats = {
storageType,
totalFiles: undefined as number | undefined,
totalSize: undefined as number | undefined,
availableSpace: undefined as number | undefined,
};
if (storageType === StorageType.LOCAL) {
// 本地存储可以计算磁盘使用情况
try {
const fs = await import('fs/promises');
const path = await import('path');
const uploadDir = config.local?.directory || './uploads';
// 计算文件总数和大小(这里简化实现)
// 实际应用中可能需要递归遍历目录
const stat = await fs.stat(uploadDir).catch(() => null);
if (stat) {
// TODO: 实现详细的目录统计
}
} catch (error) {
console.error('Failed to get local storage stats:', error);
}
}
return stats;
}
}

View File

@ -0,0 +1,153 @@
import { Server, Upload } from '@repo/tus';
import { prisma } from '@repo/db';
import { getFilenameWithoutExt } from '../utils/file';
import { nanoid } from 'nanoid-cjs';
import { slugify } from 'transliteration';
import { StorageManager } from './storage.adapter';
const FILE_UPLOAD_CONFIG = {
maxSizeBytes: 20_000_000_000, // 20GB
};
export enum QueueJobType {
UPDATE_STATS = 'update_stats',
FILE_PROCESS = 'file_process',
UPDATE_POST_VISIT_COUNT = 'updatePostVisitCount',
UPDATE_POST_STATE = 'updatePostState',
}
export enum ResourceStatus {
UPLOADING = 'UPLOADING',
UPLOADED = 'UPLOADED',
PROCESS_PENDING = 'PROCESS_PENDING',
PROCESSING = 'PROCESSING',
PROCESSED = 'PROCESSED',
PROCESS_FAILED = 'PROCESS_FAILED',
}
// 全局 TUS 服务器实例
let tusServer: Server | null = null;
function getFileId(uploadId: string) {
return uploadId.replace(/\/[^/]+$/, '');
}
async function handleUploadCreate(req: any, res: any, upload: Upload, url: string) {
try {
const fileId = getFileId(upload.id);
const storageManager = StorageManager.getInstance();
await prisma.resource.create({
data: {
title: getFilenameWithoutExt(upload.metadata?.filename || 'untitled'),
fileId, // 移除最后的文件名
url: upload.id,
meta: upload.metadata,
status: ResourceStatus.UPLOADING,
storageType: storageManager.getStorageType(), // 记录存储类型
},
});
console.log(`Resource created for ${upload.id} using ${storageManager.getStorageType()} storage`);
} catch (error) {
console.error('Failed to create resource during upload', error);
}
}
async function handleUploadFinish(req: any, res: any, upload: Upload) {
try {
const resource = await prisma.resource.update({
where: { fileId: getFileId(upload.id) },
data: { status: ResourceStatus.UPLOADED },
});
// TODO: 这里可以添加队列处理逻辑
// fileQueue.add(QueueJobType.FILE_PROCESS, { resource }, { jobId: resource.id });
console.log(`Upload finished ${resource.url} using ${StorageManager.getInstance().getStorageType()} storage`);
} catch (error) {
console.error('Failed to update resource after upload', error);
}
}
function initializeTusServer() {
if (tusServer) {
return tusServer;
}
// 获取存储管理器实例
const storageManager = StorageManager.getInstance();
const dataStore = storageManager.getDataStore();
tusServer = new Server({
namingFunction(req, metadata) {
const safeFilename = slugify(metadata?.filename || 'untitled');
const now = new Date();
const year = now.getFullYear();
const month = String(now.getMonth() + 1).padStart(2, '0');
const day = String(now.getDate()).padStart(2, '0');
const uniqueId = nanoid(10);
return `${year}/${month}/${day}/${uniqueId}/${safeFilename}`;
},
path: '/upload',
datastore: dataStore, // 使用存储适配器
maxSize: FILE_UPLOAD_CONFIG.maxSizeBytes,
postReceiveInterval: 1000,
getFileIdFromRequest: (req, lastPath) => {
const match = req.url?.match(/\/upload\/(.+)/);
return match ? match[1] : lastPath;
},
});
// 设置事件处理器
tusServer.on('POST_CREATE', handleUploadCreate);
tusServer.on('POST_FINISH', handleUploadFinish);
console.log(`TUS server initialized with ${storageManager.getStorageType()} storage`);
return tusServer;
}
export function getTusServer() {
return initializeTusServer();
}
export async function handleTusRequest(req: any, res: any) {
const server = getTusServer();
return server.handle(req, res);
}
export async function cleanupExpiredUploads() {
try {
const storageManager = StorageManager.getInstance();
// 获取过期时间配置,如果设置为 0 则不自动清理
const expirationPeriod: number = 24 * 60 * 60 * 1000;
// Delete incomplete uploads older than expiration period
const deletedResources = await prisma.resource.deleteMany({
where: {
createdAt: {
lt: new Date(Date.now() - expirationPeriod),
},
status: ResourceStatus.UPLOADING,
},
});
const server = getTusServer();
const expiredUploadCount = await server.cleanUpExpiredUploads();
console.log(
`Cleanup complete: ${deletedResources.count} resources and ${expiredUploadCount} uploads removed from ${storageManager.getStorageType()} storage`,
);
return { deletedResources: deletedResources.count, expiredUploads: expiredUploadCount };
} catch (error) {
console.error('Expired uploads cleanup failed', error);
throw error;
}
}
// 获取存储信息
export function getStorageInfo() {
const storageManager = StorageManager.getInstance();
return storageManager.getStorageInfo();
}

View File

@ -0,0 +1,29 @@
export interface UploadCompleteEvent {
identifier: string;
filename: string;
size: number;
hash: string;
integrityVerified: boolean;
}
export type UploadEvent = {
uploadStart: {
identifier: string;
filename: string;
totalSize: number;
resuming?: boolean;
};
uploadComplete: UploadCompleteEvent;
uploadError: { identifier: string; error: string; filename: string };
};
export interface UploadLock {
clientId: string;
timestamp: number;
}
// 添加重试机制,处理临时网络问题
// 实现定期清理过期的临时文件
// 添加文件完整性校验
// 实现上传进度持久化,支持服务重启后恢复
// 添加并发限制,防止系统资源耗尽
// 实现文件去重功能,避免重复上传
// 添加日志记录和监控机制

View File

@ -0,0 +1,116 @@
import { prisma } from '@repo/db';
import type { Resource } from '@repo/db';
import { StorageType } from './storage.adapter';
export async function getResourceByFileId(fileId: string): Promise<{ status: string; resource?: Resource }> {
const resource = await prisma.resource.findFirst({
where: { fileId },
});
if (!resource) {
return { status: 'pending' };
}
return { status: 'ready', resource };
}
export async function getAllResources(): Promise<Resource[]> {
return prisma.resource.findMany({
orderBy: { createdAt: 'desc' },
});
}
export async function getResourcesByStorageType(storageType: StorageType): Promise<Resource[]> {
return prisma.resource.findMany({
where: {
storageType: storageType,
},
orderBy: { createdAt: 'desc' },
});
}
export async function getResourcesByStatus(status: string): Promise<Resource[]> {
return prisma.resource.findMany({
where: { status },
orderBy: { createdAt: 'desc' },
});
}
export async function getUploadingResources(): Promise<Resource[]> {
return prisma.resource.findMany({
where: {
status: 'UPLOADING',
},
orderBy: { createdAt: 'desc' },
});
}
export async function getResourceStats(): Promise<{
total: number;
byStatus: Record<string, number>;
byStorageType: Record<string, number>;
}> {
const [total, statusStats, storageStats] = await Promise.all([
prisma.resource.count(),
prisma.resource.groupBy({
by: ['status'],
_count: true,
}),
prisma.resource.groupBy({
by: ['storageType'],
_count: true,
}),
]);
const byStatus = statusStats.reduce(
(acc, item) => {
acc[item.status || 'unknown'] = item._count || 0;
return acc;
},
{} as Record<string, number>,
);
const byStorageType = storageStats.reduce(
(acc, item) => {
const key = (item.storageType as string) || 'unknown';
acc[key] = item._count;
return acc;
},
{} as Record<string, number>,
);
return {
total,
byStatus,
byStorageType,
};
}
export async function deleteResource(id: string): Promise<Resource> {
return prisma.resource.delete({
where: { id },
});
}
export async function updateResource(id: string, data: any): Promise<Resource> {
return prisma.resource.update({
where: { id },
data,
});
}
export async function migrateResourcesStorageType(
fromStorageType: StorageType,
toStorageType: StorageType,
): Promise<{ count: number }> {
const result = await prisma.resource.updateMany({
where: {
storageType: fromStorageType,
},
data: {
storageType: toStorageType,
},
});
return { count: result.count };
}

View File

@ -0,0 +1,198 @@
import { Hono } from 'hono';
import { handleTusRequest, cleanupExpiredUploads, getStorageInfo } from './tus';
import {
getResourceByFileId,
getAllResources,
deleteResource,
updateResource,
getResourcesByStorageType,
getResourcesByStatus,
getUploadingResources,
getResourceStats,
migrateResourcesStorageType,
} from './upload.index';
import { StorageManager, StorageType, type StorageConfig } from './storage.adapter';
import { prisma } from '@repo/db';
const uploadRest = new Hono();
// 获取文件资源信息
uploadRest.get('/resource/:fileId', async (c) => {
const fileId = c.req.param('fileId');
const result = await getResourceByFileId(fileId);
return c.json(result);
});
// 获取所有资源
uploadRest.get('/resources', async (c) => {
const resources = await getAllResources();
return c.json(resources);
});
// 根据存储类型获取资源
uploadRest.get('/resources/storage/:storageType', async (c) => {
const storageType = c.req.param('storageType') as StorageType;
const resources = await getResourcesByStorageType(storageType);
return c.json(resources);
});
// 根据状态获取资源
uploadRest.get('/resources/status/:status', async (c) => {
const status = c.req.param('status');
const resources = await getResourcesByStatus(status);
return c.json(resources);
});
// 获取正在上传的资源
uploadRest.get('/resources/uploading', async (c) => {
const resources = await getUploadingResources();
return c.json(resources);
});
// 获取资源统计信息
uploadRest.get('/stats', async (c) => {
const stats = await getResourceStats();
return c.json(stats);
});
// 删除资源
uploadRest.delete('/resource/:id', async (c) => {
const id = c.req.param('id');
const result = await deleteResource(id);
return c.json(result);
});
// 更新资源
uploadRest.patch('/resource/:id', async (c) => {
const id = c.req.param('id');
const data = await c.req.json();
const result = await updateResource(id, data);
return c.json(result);
});
// 迁移资源存储类型(批量更新数据库中的存储类型标记)
uploadRest.post('/migrate-storage', async (c) => {
try {
const { from, to } = await c.req.json();
const result = await migrateResourcesStorageType(from as StorageType, to as StorageType);
return c.json({
success: true,
message: `Migrated ${result.count} resources from ${from} to ${to}`,
count: result.count,
});
} catch (error) {
console.error('Failed to migrate storage type:', error);
return c.json(
{
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
},
400,
);
}
});
// 清理过期上传
uploadRest.post('/cleanup', async (c) => {
const result = await cleanupExpiredUploads();
return c.json(result);
});
// 手动清理指定状态的资源
uploadRest.post('/cleanup/by-status', async (c) => {
try {
const { status, olderThanDays } = await c.req.json();
const cutoffDate = new Date();
cutoffDate.setDate(cutoffDate.getDate() - (olderThanDays || 30));
const deletedResources = await prisma.resource.deleteMany({
where: {
status,
createdAt: {
lt: cutoffDate,
},
},
});
return c.json({
success: true,
message: `Deleted ${deletedResources.count} resources with status ${status}`,
count: deletedResources.count,
});
} catch (error) {
console.error('Failed to cleanup by status:', error);
return c.json(
{
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
},
400,
);
}
});
// 获取存储信息
uploadRest.get('/storage/info', async (c) => {
const storageInfo = getStorageInfo();
return c.json(storageInfo);
});
// 切换存储类型(需要重启应用)
uploadRest.post('/storage/switch', async (c) => {
try {
const newConfig = (await c.req.json()) as StorageConfig;
const storageManager = StorageManager.getInstance();
await storageManager.switchStorage(newConfig);
return c.json({
success: true,
message: 'Storage configuration updated. Please restart the application for changes to take effect.',
newType: newConfig.type,
});
} catch (error) {
console.error('Failed to switch storage:', error);
return c.json(
{
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
},
400,
);
}
});
// 验证存储配置
uploadRest.post('/storage/validate', async (c) => {
try {
const config = (await c.req.json()) as StorageConfig;
const { validateStorageConfig } = await import('./storage.adapter');
const errors = validateStorageConfig(config);
if (errors.length > 0) {
return c.json({ valid: false, errors }, 400);
}
return c.json({ valid: true, message: 'Storage configuration is valid' });
} catch (error) {
return c.json(
{
valid: false,
errors: [error instanceof Error ? error.message : 'Invalid JSON'],
},
400,
);
}
});
// TUS 协议处理 - 使用通用处理器
uploadRest.all('/*', async (c) => {
try {
await handleTusRequest(c.req.raw, c.res);
return new Response(null);
} catch (error) {
console.error('TUS request error:', error);
return c.json({ error: 'Upload request failed' }, 500);
}
});
export default uploadRest;

View File

@ -0,0 +1,4 @@
export function extractFileIdFromNginxUrl(url: string) {
const match = url.match(/uploads\/(\d{4}\/\d{2}\/\d{2}\/[^/]+)/);
return match ? match[1] : '';
}

View File

@ -0,0 +1,67 @@
import { createHash } from 'crypto';
import { createReadStream } from 'fs';
import path from 'path';
import * as dotenv from 'dotenv';
import dayjs from 'dayjs';
dotenv.config();
export function getFilenameWithoutExt(filename: string | null | undefined) {
return filename ? filename.replace(/\.[^/.]+$/, '') : filename || dayjs().format('YYYYMMDDHHmmss');
}
/**
* SHA-256
* @param filePath
* @returns Promise<string>
*/
export async function calculateFileHash(filePath: string): Promise<string> {
return new Promise((resolve, reject) => {
// 创建一个 SHA-256 哈希对象
const hash = createHash('sha256');
// 创建文件读取流
const readStream = createReadStream(filePath);
// 处理读取错误
readStream.on('error', (error) => {
reject(new Error(`Failed to read file: ${error.message}`));
});
// 处理哈希计算错误
hash.on('error', (error) => {
reject(new Error(`Failed to calculate hash: ${error.message}`));
});
// 流式处理文件内容
readStream
.pipe(hash)
.on('finish', () => {
// 获取最终的哈希值(十六进制格式)
const fileHash = hash.digest('hex');
resolve(fileHash);
})
.on('error', (error) => {
reject(new Error(`Hash calculation failed: ${error.message}`));
});
});
}
/**
* Buffer SHA-256
* @param buffer Buffer
* @returns string Buffer
*/
export function calculateBufferHash(buffer: Buffer): string {
const hash = createHash('sha256');
hash.update(buffer);
return hash.digest('hex');
}
/**
* SHA-256
* @param content
* @returns string
*/
export function calculateStringHash(content: string): string {
const hash = createHash('sha256');
hash.update(content);
return hash.digest('hex');
}
export const getUploadFilePath = (fileId: string): string => {
const uploadDirectory = process.env.UPLOAD_DIR || '';
return path.join(uploadDirectory, fileId);
};

View File

@ -1,11 +1,10 @@
{
"extends": "@repo/typescript-config/hono.json",
"compilerOptions": {
"moduleResolution": "bundler",
"paths": {
"@/*": ["./*"],
"@repo/db/*": ["../../packages/db/src/*"],
},
}
}
"extends": "@repo/typescript-config/hono.json",
"compilerOptions": {
"moduleResolution": "bundler",
"paths": {
"@/*": ["./*"],
"@repo/db/*": ["../../packages/db/src/*"]
}
}
}

View File

@ -1 +1 @@
DATABASE_URL="postgresql://root:Letusdoit000@localhost:5432/app?schema=public"
DATABASE_URL="postgresql://nice:Letusdoit123@localhost:5432/db?schema=public"

View File

@ -0,0 +1,172 @@
'use client';
import { useHello, useTRPC, useWebSocket, MessageType } from '@repo/client';
import { useQuery } from '@tanstack/react-query';
import { useRef, useState, useEffect } from 'react';
export default function WebSocketPage() {
const trpc = useTRPC();
const [message, setMessage] = useState('');
const [roomId, setRoomId] = useState('');
const messagesEndRef = useRef<HTMLDivElement>(null);
// 使用 WebSocket hook
const { messages, currentRoom, connecting, joinRoom, leaveRoom, sendMessage } = useWebSocket();
// 滚动到底部
const scrollToBottom = () => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
};
// 当消息更新时自动滚动到底部
useEffect(() => {
scrollToBottom();
}, [messages]);
const handleJoinRoom = async () => {
const success = await joinRoom(roomId.trim());
if (success) {
setRoomId('');
}
};
const handleLeaveRoom = async () => {
await leaveRoom();
};
const handleSendMessage = async () => {
const success = await sendMessage({
text: message,
type: MessageType.TEXT,
});
if (success) {
setMessage('');
}
};
// 处理按回车发送消息
const handleKeyPress = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
handleSendMessage();
}
};
// 处理房间ID输入框的回车事件
const handleRoomKeyPress = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
handleJoinRoom();
}
};
return (
<div className="p-4">
<h1 className="text-2xl font-bold mb-4">WebSocket </h1>
{/* 房间管理 */}
<div className="mb-6 p-4 border rounded">
<h2 className="text-xl mb-2"></h2>
{!currentRoom ? (
<div className="flex gap-2">
<input
type="text"
value={roomId}
onChange={(e) => setRoomId(e.target.value)}
onKeyPress={handleRoomKeyPress}
disabled={connecting}
className="border border-gray-300 rounded px-3 py-2 flex-1"
placeholder="输入房间ID..."
/>
<button
onClick={handleJoinRoom}
disabled={connecting}
className={`px-4 py-2 rounded text-white ${
connecting ? 'bg-gray-400 cursor-not-allowed' : 'bg-green-500 hover:bg-green-600'
}`}
>
{connecting ? '连接中...' : '加入房间'}
</button>
</div>
) : (
<div className="flex gap-2 items-center">
<span>: {currentRoom}</span>
<button
onClick={handleLeaveRoom}
disabled={connecting}
className={`px-4 py-2 rounded text-white ${
connecting ? 'bg-gray-400 cursor-not-allowed' : 'bg-red-500 hover:bg-red-600'
}`}
>
</button>
</div>
)}
</div>
{/* 消息显示区域 */}
{currentRoom && (
<div className="mb-4">
<div className="border rounded p-4 h-[400px] overflow-y-auto mb-4 bg-gray-50">
{messages.map((msg, index) => (
<div
key={index}
className={`mb-2 p-2 rounded ${
msg.action === 'system'
? 'bg-gray-200 text-gray-700'
: msg.action === 'error'
? 'bg-red-100 text-red-700'
: 'bg-blue-100 text-blue-700'
}`}
>
<div className="flex items-center gap-2">
{msg.data.type === MessageType.IMAGE && msg.data.fileUri && (
<img src={msg.data.fileUri} alt="图片消息" className="max-w-xs" />
)}
{msg.data.type === MessageType.FILE && msg.data.fileUri && (
<a
href={msg.data.fileUri}
target="_blank"
rel="noopener noreferrer"
className="text-blue-600 hover:underline"
>
</a>
)}
{msg.data.text && <div>{msg.data.text}</div>}
</div>
</div>
))}
<div ref={messagesEndRef} />
</div>
<div className="flex gap-2">
<input
type="text"
value={message}
onChange={(e) => setMessage(e.target.value)}
onKeyPress={handleKeyPress}
disabled={connecting}
className="border border-gray-300 rounded px-3 py-2 flex-1"
placeholder="输入消息..."
/>
<button
onClick={handleSendMessage}
disabled={connecting}
className={`px-4 py-2 rounded text-white ${
connecting ? 'bg-gray-400 cursor-not-allowed' : 'bg-blue-500 hover:bg-blue-600'
}`}
>
</button>
</div>
</div>
)}
{!currentRoom && (
<div>
<p className="text-gray-600">提示: 请先加入一个房间开始聊天</p>
<p className="text-gray-600">ID来测试房间内通信</p>
</div>
)}
</div>
);
}

View File

@ -0,0 +1,122 @@
import { useState } from "react";
import * as tus from "tus-js-client";
import { env } from "../env";
import { getCompressedImageUrl } from "@nice/utils";
interface UploadResult {
compressedUrl: string;
url: string;
fileId: string;
fileName: string;
}
export function useTusUpload() {
const [uploadProgress, setUploadProgress] = useState<
Record<string, number>
>({});
const [isUploading, setIsUploading] = useState(false);
const [uploadError, setUploadError] = useState<string | null>(null);
const getFileId = (url: string) => {
const parts = url.split("/");
const uploadIndex = parts.findIndex((part) => part === "upload");
if (uploadIndex === -1 || uploadIndex + 4 >= parts.length) {
throw new Error("Invalid upload URL format");
}
return parts.slice(uploadIndex + 1, uploadIndex + 5).join("/");
};
const getResourceUrl = (url: string) => {
const parts = url.split("/");
const uploadIndex = parts.findIndex((part) => part === "upload");
if (uploadIndex === -1 || uploadIndex + 4 >= parts.length) {
throw new Error("Invalid upload URL format");
}
const resUrl = `http://${env.SERVER_IP}:${env.FILE_PORT}/uploads/${parts.slice(uploadIndex + 1, uploadIndex + 6).join("/")}`;
return resUrl;
};
const handleFileUpload = async (
file: File | Blob,
onSuccess: (result: UploadResult) => void,
onError: (error: Error) => void,
fileKey: string // 添加文件唯一标识
) => {
// console.log()
setIsUploading(true);
setUploadProgress((prev) => ({ ...prev, [fileKey]: 0 }));
setUploadError(null);
try {
// 如果是Blob需要转换为File
let fileName = "uploaded-file";
if (file instanceof Blob && !(file instanceof File)) {
// 根据MIME类型设置文件扩展名
const extension = file.type.split('/')[1];
fileName = `uploaded-file.${extension}`;
}
const uploadFile = file instanceof Blob && !(file instanceof File)
? new File([file], fileName, { type: file.type })
: file as File;
console.log(`http://${env.SERVER_IP}:${env.SERVER_PORT}/upload`);
const upload = new tus.Upload(uploadFile, {
endpoint: `http://${env.SERVER_IP}:${env.SERVER_PORT}/upload`,
retryDelays: [0, 1000, 3000, 5000],
metadata: {
filename: uploadFile.name,
filetype: uploadFile.type,
size: uploadFile.size as any,
},
onProgress: (bytesUploaded, bytesTotal) => {
const progress = Number(
((bytesUploaded / bytesTotal) * 100).toFixed(2)
);
setUploadProgress((prev) => ({
...prev,
[fileKey]: progress,
}));
},
onSuccess: async (payload) => {
if (upload.url) {
const fileId = getFileId(upload.url);
//console.log(fileId)
const url = getResourceUrl(upload.url);
setIsUploading(false);
setUploadProgress((prev) => ({
...prev,
[fileKey]: 100,
}));
onSuccess({
compressedUrl: getCompressedImageUrl(url),
url,
fileId,
fileName: uploadFile.name,
});
}
},
onError: (error) => {
const err =
error instanceof Error
? error
: new Error("Unknown error");
setIsUploading(false);
setUploadError(error.message);
console.log(error);
onError(err);
},
});
upload.start();
} catch (error) {
const err =
error instanceof Error ? error : new Error("Upload failed");
setIsUploading(false);
setUploadError(err.message);
onError(err);
}
};
return {
uploadProgress,
isUploading,
uploadError,
handleFileUpload,
};
}

View File

@ -3,21 +3,18 @@ services:
image: postgres:${POSTGRES_VERSION:-16}
container_name: postgres
ports:
- "5432:5432"
- '5432:5432'
environment:
- POSTGRES_DB=${POSTGRES_DB:-nice_db}
- POSTGRES_USER=${POSTGRES_USER:-nice_user}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-nice_password}
volumes:
- type: volume
source: postgres_data
target: /var/lib/postgresql/data
- ./volumes/postgres:/var/lib/postgresql/data
# - type: volume
# source: postgres_data
# target: /var/lib/postgresql/data
healthcheck:
test:
[
"CMD-SHELL",
"sh -c 'pg_isready -U ${POSTGRES_USER:-nice_user} -d ${POSTGRES_DB:-nice_db}'",
]
test: ['CMD-SHELL', "sh -c 'pg_isready -U ${POSTGRES_USER:-nice_user} -d ${POSTGRES_DB:-nice_db}'"]
interval: 10s
timeout: 3s
retries: 3

View File

@ -4,19 +4,23 @@ services:
container_name: elasticsearch
restart: always
ports:
- "9200:9200"
- "9300:9300"
- '9200:9200'
- '9300:9300'
networks:
- nice-net
environment:
- discovery.type=single-node
- xpack.security.enabled=true
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD:-nice_elastic_password}
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- 'ES_JAVA_OPTS=-Xms512m -Xmx512m'
volumes:
- type: volume
source: elasticsearch_data
target: /usr/share/elasticsearch/data
- type: tmpfs
target: /tmp
tmpfs:
size: 1G
healthcheck:
test: curl -s http://localhost:9200/_cluster/health | grep -v '"status":"red"'
interval: 10s
@ -29,4 +33,4 @@ volumes:
driver_opts:
type: none
device: ${PWD}/volumes/elasticsearch
o: bind
o: bind

View File

@ -1,23 +1,21 @@
services:
minio:
image: minio/minio:${MINIO_VERSION:-RELEASE.2024-04-22T22-12-26Z}
image: minio/minio:${MINIO_VERSION:-RELEASE.2024-04-18T19-09-19Z}
container_name: minio
restart: always
ports:
- "9000:9000"
- "9001:9001"
- '9000:9000'
- '9001:9001'
networks:
- nice-net
environment:
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-nice_minio_access}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-nice_minio_secret}
volumes:
- type: volume
source: storage_data
target: /data
- ./volumes/minio:/data
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
test: ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live']
interval: 30s
timeout: 20s
retries: 3

View File

@ -1,7 +1,14 @@
import { api } from '../trpc';
import { useQuery } from '@tanstack/react-query';
import { useTRPC } from '../trpc';
export function useHello() {
const trpc = useTRPC();
// trpc.user.getUser.queryOptions();
const getUser = useQuery(trpc.user.getUser.queryOptions());
console.log(getUser);
// useQuery;
return {
hello: api.hello,
getUser: getUser,
// hello: api.hello,
};
}

View File

@ -1 +1,3 @@
export * from './api';
export * from './websocket';
export { MessageType } from './websocket/type';

View File

@ -0,0 +1,192 @@
// WebSocket 消息接口
import { WSMessage, WSMessageParams, MessageType } from './type';
export class WebSocketClient {
private ws: WebSocket | null = null;
private url: string;
private currentRoom: string | null = null;
private messageHandler: ((message: WSMessage) => void) | null = null;
private connectionPromise: Promise<void> | null = null;
constructor(url: string = 'ws://localhost:3000/ws') {
this.url = url;
}
private async ensureConnected(): Promise<void> {
if (this.ws?.readyState === WebSocket.OPEN) {
return;
}
if (this.connectionPromise) {
return this.connectionPromise;
}
this.connectionPromise = new Promise((resolve, reject) => {
this.disconnect(); // 确保先断开旧连接
this.ws = new WebSocket(this.url);
this.ws.onopen = () => {
console.log('WebSocket 连接已建立');
this.connectionPromise = null;
resolve();
};
this.ws.onmessage = (event) => {
try {
const message = JSON.parse(event.data) as WSMessage;
// 处理所有系统消息、错误消息,以及当前房间的所有消息
if (
message.action === 'system' ||
message.action === 'error' ||
(message.action === 'message' && message.roomId === this.currentRoom)
) {
console.log('收到消息:', message);
// 触发消息处理器
if (this.messageHandler) {
this.messageHandler(message);
}
}
} catch (error) {
console.error('解析消息失败:', error);
}
};
this.ws.onclose = () => {
console.log('WebSocket 连接已关闭');
this.cleanup();
reject(new Error('WebSocket connection closed'));
};
this.ws.onerror = (error) => {
console.error('WebSocket 错误:', error);
this.cleanup();
reject(error);
};
// 5秒超时
setTimeout(() => {
if (this.ws?.readyState !== WebSocket.OPEN) {
this.cleanup();
reject(new Error('WebSocket connection timeout'));
}
}, 5000);
});
return this.connectionPromise;
}
async connect() {
return this.ensureConnected();
}
private cleanup() {
if (this.ws) {
this.ws.onclose = null;
this.ws.onerror = null;
this.ws.onmessage = null;
this.ws.onopen = null;
this.ws = null;
}
this.currentRoom = null;
this.messageHandler = null;
this.connectionPromise = null;
}
// 设置消息处理器
onMessage(handler: (message: WSMessage) => void) {
this.messageHandler = handler;
return () => {
if (this.messageHandler === handler) {
this.messageHandler = null;
}
};
}
async joinRoom(roomId: string) {
try {
await this.ensureConnected();
// 如果已经在一个房间中,先离开
if (this.currentRoom) {
await this.leaveRoom();
}
const message: WSMessage = {
action: 'join',
roomId,
data: {
text: `加入房间 ${roomId}`,
type: MessageType.TEXT,
},
};
this.ws!.send(JSON.stringify(message));
this.currentRoom = roomId;
} catch (error) {
console.error('加入房间失败:', error);
throw error;
}
}
async leaveRoom() {
if (this.ws?.readyState === WebSocket.OPEN && this.currentRoom) {
const message: WSMessage = {
action: 'leave',
roomId: this.currentRoom,
data: {
text: `离开房间 ${this.currentRoom}`,
type: MessageType.TEXT,
},
};
this.ws.send(JSON.stringify(message));
this.currentRoom = null;
}
}
async sendMessage(messageParams: WSMessageParams) {
try {
await this.ensureConnected();
if (!this.currentRoom) {
throw new Error('请先加入房间');
}
const messageObj: WSMessage = {
action: 'message',
roomId: this.currentRoom,
data: {
text: messageParams.text || '',
type: messageParams.type || MessageType.TEXT,
fileUri: messageParams.fileUri,
},
};
this.ws!.send(JSON.stringify(messageObj));
} catch (error) {
console.error('发送消息失败:', error);
throw error;
}
}
disconnect() {
if (this.currentRoom) {
this.leaveRoom();
}
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
this.ws.close();
}
this.cleanup();
}
getCurrentRoom() {
return this.currentRoom;
}
isConnected() {
return this.ws?.readyState === WebSocket.OPEN;
}
}
// 导出一个默认的 WebSocket 客户端实例
export const wsClient = new WebSocketClient();

View File

@ -0,0 +1,3 @@
export * from './client';
export * from './useWebSocket';
export * from './type';

View File

@ -0,0 +1,18 @@
export enum MessageType {
TEXT = 'text',
IMAGE = 'image',
FILE = 'file',
RETRACT = 'retract', // 用于实现撤回
}
export interface WSMessageParams {
text?: string;
type?: MessageType;
fileUri?: string;
}
export interface WSMessage {
action: 'join' | 'leave' | 'message' | 'system' | 'error';
roomId?: string;
data: WSMessageParams;
}

View File

@ -0,0 +1,189 @@
import { useCallback, useEffect, useState } from 'react';
import { wsClient } from './client';
import { WSMessage, WSMessageParams, MessageType } from './type';
interface UseWebSocketReturn {
// 状态
messages: WSMessage[];
currentRoom: string | null;
connecting: boolean;
// 方法
joinRoom: (roomId: string) => Promise<boolean>;
leaveRoom: () => Promise<boolean>;
sendMessage: (messageParams: WSMessageParams) => Promise<boolean>;
// 工具方法
clearMessages: () => void;
}
export const useWebSocket = (): UseWebSocketReturn => {
const [messages, setMessages] = useState<WSMessage[]>([]);
const [currentRoom, setCurrentRoom] = useState<string | null>(null);
const [connecting, setConnecting] = useState(false);
// 消息处理器
const messageHandler = useCallback((message: WSMessage) => {
setMessages((prev) => [...prev, message]);
}, []);
// 初始化 WebSocket 连接
useEffect(() => {
const initConnection = async () => {
try {
setConnecting(true);
await wsClient.connect();
const unsubscribe = wsClient.onMessage(messageHandler);
return unsubscribe;
} catch (error) {
console.error('连接失败:', error);
setMessages((prev) => [
...prev,
{
action: 'error',
data: {
text: '连接失败,请刷新页面重试',
type: MessageType.TEXT,
},
},
]);
} finally {
setConnecting(false);
}
};
const unsubscribePromise = initConnection();
return () => {
unsubscribePromise.then((unsubscribe) => unsubscribe?.());
wsClient.disconnect();
};
}, [messageHandler]);
// 加入房间
const joinRoom = async (roomId: string): Promise<boolean> => {
// 验证房间ID
if (!roomId.trim()) {
setMessages((prev) => [
...prev,
{
action: 'error',
data: {
text: '请输入有效的房间ID',
type: MessageType.TEXT,
},
},
]);
return false;
}
// 检查是否正在连接
if (connecting) {
return false;
}
try {
setConnecting(true);
await wsClient.joinRoom(roomId.trim());
setCurrentRoom(roomId.trim());
setMessages([]); // 清空消息历史
return true;
} catch (error) {
console.error('加入房间失败:', error);
setMessages((prev) => [
...prev,
{
action: 'error',
data: {
text: '加入房间失败,请重试',
type: MessageType.TEXT,
},
},
]);
return false;
} finally {
setConnecting(false);
}
};
// 离开房间
const leaveRoom = async (): Promise<boolean> => {
if (connecting) {
return false;
}
try {
await wsClient.disconnect();
setCurrentRoom(null);
setMessages([]); // 清空消息历史
return true;
} catch (error) {
console.error('离开房间失败:', error);
return false;
}
};
// 发送消息
const sendMessage = async (messageParams: WSMessageParams): Promise<boolean> => {
// 验证消息内容
if (!messageParams.text?.trim() && !messageParams.fileUri) {
return false;
}
// 检查房间状态
if (!currentRoom) {
setMessages((prev) => [
...prev,
{
action: 'error',
data: {
text: '请先加入房间',
type: MessageType.TEXT,
},
},
]);
return false;
}
// 检查连接状态
if (connecting) {
return false;
}
try {
await wsClient.sendMessage(messageParams);
return true;
} catch (error) {
console.error('发送消息失败:', error);
setMessages((prev) => [
...prev,
{
action: 'error',
data: {
text: '发送消息失败,请重试',
type: MessageType.TEXT,
},
},
]);
return false;
}
};
// 清空消息
const clearMessages = useCallback(() => {
setMessages([]);
}, []);
return {
// 状态
messages,
currentRoom,
connecting,
// 方法
joinRoom,
leaveRoom,
sendMessage,
// 工具方法
clearMessages,
};
};

View File

@ -20,6 +20,6 @@
"@repo/backend/*": ["../../apps/backend/src/*"]
}
},
"include": ["src"],
"include": ["src", "../../apps/web/hooks/useTusUpload.ts"],
"exclude": ["node_modules", "dist"]
}

View File

@ -1 +1 @@
DATABASE_URL="postgresql://root:Letusdoit000@localhost:5432/app?schema=public"
DATABASE_URL="postgresql://nice:Letusdoit123@localhost:5432/db?schema=public"

View File

@ -0,0 +1,28 @@
-- CreateTable
CREATE TABLE "resource" (
"id" TEXT NOT NULL,
"title" TEXT,
"description" TEXT,
"type" TEXT,
"fileId" TEXT,
"url" TEXT,
"meta" JSONB,
"status" TEXT,
"created_at" TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP,
"updated_at" TIMESTAMP(3),
"created_by" TEXT,
"updated_by" TEXT,
"deleted_at" TIMESTAMP(3),
"is_public" BOOLEAN DEFAULT true,
CONSTRAINT "resource_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "resource_fileId_key" ON "resource"("fileId");
-- CreateIndex
CREATE INDEX "resource_type_idx" ON "resource"("type");
-- CreateIndex
CREATE INDEX "resource_created_at_idx" ON "resource"("created_at");

View File

@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "resource" ADD COLUMN "storage_type" TEXT;

View File

@ -112,3 +112,25 @@ model OidcClient {
@@map("oidc_clients")
}
model Resource {
id String @id @default(cuid()) @map("id")
title String? @map("title")
description String? @map("description")
type String? @map("type")
fileId String? @unique
url String?
meta Json? @map("meta")
status String?
createdAt DateTime? @default(now()) @map("created_at")
updatedAt DateTime? @updatedAt @map("updated_at")
createdBy String? @map("created_by")
updatedBy String? @map("updated_by")
deletedAt DateTime? @map("deleted_at")
isPublic Boolean? @default(true) @map("is_public")
storageType String? @map("storage_type")
// 索引
@@index([type])
@@index([createdAt])
@@map("resource")
}

View File

@ -1,58 +1,59 @@
{
"name": "@repo/icons",
"version": "1.7.0",
"license": "MIT",
"homepage": "https://github.com/teableio/teable",
"publishConfig": {
"access": "public"
},
"repository": {
"type": "git",
"url": "https://github.com/teableio/teable",
"directory": "packages/icons"
},
"main": "./dist/index.js",
"module": "./dist/index.js",
"types": "./dist/index.d.ts",
"exports": {
".": {
"import": "./dist/index.js",
"require": "./dist/index.js",
"types": "./dist/index.d.ts"
}
},
"files": [
"dist"
],
"scripts": {
"build": "tsc",
"clean": "rimraf ./dist ./build ./tsconfig.tsbuildinfo ./node_modules/.cache",
"dev": "rm -rf dist && tsc --watch",
"test": "echo \"Error: no test specified\"",
"lint": "eslint . --ext .ts,.tsx,.js,.jsx,.cjs,.mjs,.mdx --cache --cache-location ../../.cache/eslint/icons.eslintcache",
"typecheck": "tsc --project ./tsconfig.json --noEmit",
"generate": "rm -rf src/components && node ./scripts/generate.mjs"
},
"peerDependencies": {
"react": "^19.1.0",
"react-dom": "^19.1.0"
},
"devDependencies": {
"@svgr/core": "8.1.0",
"@svgr/plugin-jsx": "8.1.0",
"@svgr/plugin-prettier": "8.1.0",
"@svgr/plugin-svgo": "8.1.0",
"@types/fs-extra": "11.0.4",
"@types/node": "20.9.0",
"@types/react": "18.2.45",
"axios": "1.7.7",
"chalk": "5.3.0",
"dotenv": "16.4.5",
"eslint": "8.57.0",
"figma-js": "1.16.0",
"fs-extra": "11.2.0",
"lodash": "4.17.21",
"rimraf": "5.0.5",
"typescript": "5.4.3"
}
"name": "@repo/icons",
"version": "1.7.0",
"license": "MIT",
"homepage": "https://github.com/teableio/teable",
"publishConfig": {
"access": "public"
},
"repository": {
"type": "git",
"url": "https://github.com/teableio/teable",
"directory": "packages/icons"
},
"main": "./dist/index.js",
"module": "./dist/index.js",
"types": "./dist/index.d.ts",
"exports": {
".": {
"import": "./dist/index.js",
"require": "./dist/index.js",
"types": "./dist/index.d.ts"
}
},
"files": [
"dist"
],
"scripts": {
"build": "tsc",
"clean": "rimraf ./dist ./build ./tsconfig.tsbuildinfo ./node_modules/.cache",
"dev": "rm -rf dist && tsc --watch",
"test": "echo \"Error: no test specified\"",
"lint": "eslint . --ext .ts,.tsx,.js,.jsx,.cjs,.mjs,.mdx --cache --cache-location ../../.cache/eslint/icons.eslintcache",
"typecheck": "tsc --project ./tsconfig.json --noEmit",
"generate": "rm -rf src/components && node ./scripts/generate.mjs"
},
"peerDependencies": {
"react": "^19.1.0",
"react-dom": "^19.1.0"
},
"devDependencies": {
"@svgr/core": "8.1.0",
"@svgr/plugin-jsx": "8.1.0",
"@svgr/plugin-prettier": "8.1.0",
"@svgr/plugin-svgo": "8.1.0",
"@types/fs-extra": "11.0.4",
"@types/node": "20.9.0",
"@types/react": "^19.1.4",
"@types/react-dom": "^19.1.5",
"axios": "1.7.7",
"chalk": "5.3.0",
"dotenv": "16.4.5",
"eslint": "8.57.0",
"figma-js": "1.16.0",
"fs-extra": "11.2.0",
"lodash": "4.17.21",
"rimraf": "5.0.5",
"typescript": "5.4.3"
}
}

View File

@ -1,20 +1,20 @@
{
"$schema": "https://json.schemastore.org/tsconfig",
"extends": "../../tsconfig.base.json",
"compilerOptions": {
"jsx": "react",
"baseUrl": "./src",
"target": "esnext",
"outDir": "./dist",
"rootDir": "./src",
"lib": ["dom", "dom.iterable", "esnext"],
"module": "esnext",
"noEmit": false,
"incremental": true,
"composite": true,
"moduleResolution": "node",
"esModuleInterop": true
},
"exclude": ["**/node_modules", "**/.*/", "dist", "build"],
"include": ["./src"]
"$schema": "https://json.schemastore.org/tsconfig",
"extends": "@repo/typescript-config/base.json",
"compilerOptions": {
"jsx": "react",
"baseUrl": "./src",
"target": "esnext",
"outDir": "./dist",
"rootDir": "./src",
"lib": ["dom", "dom.iterable", "esnext"],
"module": "esnext",
"noEmit": false,
"incremental": true,
"composite": true,
"moduleResolution": "node",
"esModuleInterop": true
},
"exclude": ["**/node_modules", "**/.*/", "dist", "build"],
"include": ["./src"]
}

File diff suppressed because one or more lines are too long

36
packages/tus/package.json Normal file
View File

@ -0,0 +1,36 @@
{
"name": "@repo/tus",
"version": "1.0.0",
"main": "./dist/index.js",
"module": "./dist/index.mjs",
"types": "./dist/index.d.ts",
"private": true,
"scripts": {
"build": "tsup",
"dev": "tsup --watch",
"dev-static": "tsup --no-watch",
"clean": "rimraf dist",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.723.0",
"@shopify/semaphore": "^3.1.0",
"debug": "^4.4.0",
"lodash.throttle": "^4.1.1",
"multistream": "^4.1.0"
},
"devDependencies": {
"@types/debug": "^4.1.12",
"@types/lodash.throttle": "^4.1.9",
"@types/multistream": "^4.1.3",
"@types/node": "^20.3.1",
"concurrently": "^8.0.0",
"ioredis": "^5.4.1",
"rimraf": "^6.0.1",
"should": "^13.2.3",
"ts-node": "^10.9.1",
"tsup": "^8.3.5",
"typescript": "^5.5.4",
"@redis/client": "^1.6.0"
}
}

View File

@ -0,0 +1,364 @@
import EventEmitter from 'node:events'
import stream from 'node:stream/promises'
import { addAbortSignal, PassThrough } from 'node:stream'
import type http from 'node:http'
import type { ServerOptions } from '../types'
import throttle from 'lodash.throttle'
import { CancellationContext, DataStore, ERRORS, EVENTS, StreamLimiter, Upload } from '../utils'
/**
* URL ID
* URL
* - `([^/]+)`
* - `\/?$`
*
* - `/files/12345` `12345`
* - `/files/12345/` `12345`
*/
const reExtractFileID = /([^/]+)\/?$/
/**
* HTTP `forwarded`
* `host="<value>"` `host=<value>` `<value>`
* - `host="?` `host=` `host="`
* - `([^";]+)`
*
* - `host="example.com"` `example.com`
* - `host=example.com` `example.com`
*/
const reForwardedHost = /host="?([^";]+)/
/**
* HTTP `forwarded` `http` `https`
* `proto=<value>` `<value>`
* - `proto=` `proto=`
* - `(https?)` `http` `https`
*
* - `proto=https` `https`
* - `proto=http` `http`
*/
const reForwardedProto = /proto=(https?)/
/**
* BaseHandler TUS
* Node.js EventEmitter
*/
export class BaseHandler extends EventEmitter {
options: ServerOptions
store: DataStore
/**
* BaseHandler
* @param store -
* @param options -
* @throws store
*/
constructor(store: DataStore, options: ServerOptions) {
super()
if (!store) {
throw new Error('Store must be defined')
}
this.store = store
this.options = options
}
/**
* HTTP
* @param res - HTTP
* @param status - HTTP
* @param headers -
* @param body -
* @returns
*/
write(res: http.ServerResponse, status: number, headers = {}, body = '') {
if (status !== 204) {
// @ts-expect-error not explicitly typed but possible
headers['Content-Length'] = Buffer.byteLength(body, 'utf8')
}
res.writeHead(status, headers)
res.write(body)
return res.end()
}
/**
* URL
* @param req - HTTP
* @param id - ID
* @returns URL
*/
generateUrl(req: http.IncomingMessage, id: string) {
const path = this.options.path === '/' ? '' : this.options.path
if (this.options.generateUrl) {
// 使用用户定义的 generateUrl 函数生成 URL
const { proto, host } = this.extractHostAndProto(req)
return this.options.generateUrl(req, {
proto,
host,
path: path,
id,
})
}
// 默认实现
if (this.options.relativeLocation) {
return `${path}/${id}`
}
const { proto, host } = this.extractHostAndProto(req)
return `${proto}://${host}${path}/${id}`
}
/**
* ID
* @param req - HTTP
* @returns ID undefined
*/
getFileIdFromRequest(req: http.IncomingMessage) {
const match = reExtractFileID.exec(req.url as string)
if (this.options.getFileIdFromRequest) {
const lastPath = match ? decodeURIComponent(match[1]) : undefined
return this.options.getFileIdFromRequest(req, lastPath)
}
if (!match || this.options.path.includes(match[1])) {
return
}
return decodeURIComponent(match[1])
}
/**
* HTTP
* respectForwardedHeaders
*
* 使http
*
* @param req - HTTP
* @returns
*/
protected extractHostAndProto(req: http.IncomingMessage) {
let proto: string | undefined
let host: string | undefined
// 如果启用了尊重转发头选项
if (this.options.respectForwardedHeaders) {
// 从请求头中获取 forwarded 字段
const forwarded = req.headers.forwarded as string | undefined
if (forwarded) {
// 使用正则表达式从 forwarded 字段中提取主机名和协议
host ??= reForwardedHost.exec(forwarded)?.[1]
proto ??= reForwardedProto.exec(forwarded)?.[1]
}
// 从请求头中获取 x-forwarded-host 和 x-forwarded-proto 字段
const forwardHost = req.headers['x-forwarded-host']
const forwardProto = req.headers['x-forwarded-proto']
// 检查 x-forwarded-proto 是否为有效的协议http 或 https
// @ts-expect-error we can pass undefined
if (['http', 'https'].includes(forwardProto)) {
proto ??= forwardProto as string
}
// 如果 x-forwarded-host 存在,则使用它作为主机名
host ??= forwardHost as string
}
// 如果未从转发头中获取到主机名,则使用请求头中的 host 字段
host ??= req.headers.host
// 如果未从转发头中获取到协议,则默认使用 http
proto ??= 'http'
// 返回包含主机名和协议的对象
return { host: host as string, proto }
}
/**
*
* @param req - HTTP
* @returns
*/
protected async getLocker(req: http.IncomingMessage) {
if (typeof this.options.locker === 'function') {
return this.options.locker(req)
}
return this.options.locker
}
/**
*
* @param req - HTTP
* @param id - ID
* @param context -
* @returns
*/
protected async acquireLock(
req: http.IncomingMessage,
id: string,
context: CancellationContext
) {
const locker = await this.getLocker(req)
const lock = locker.newLock(id)
await lock.lock(() => {
context.cancel()
})
return lock
}
/**
*
* HTTP
*
* @param req - HTTP
* @param upload - ID
* @param maxFileSize -
* @param context -
* @returns Promise
*/
protected writeToStore(
req: http.IncomingMessage,
upload: Upload,
maxFileSize: number,
context: CancellationContext
) {
// 使用 Promise 包装异步操作,以便更好地处理取消和错误。
// biome-ignore lint/suspicious/noAsyncPromiseExecutor: <explanation>
return new Promise<number>(async (resolve, reject) => {
// 检查是否已被取消,如果已取消则直接拒绝 Promise。
if (context.signal.aborted) {
reject(ERRORS.ABORTED)
return
}
// 创建一个 PassThrough 流作为代理,用于管理请求流。
// PassThrough 流是一个透明的流,它允许数据通过而不进行任何修改。
// 使用代理流的好处是可以在不影响原始请求流的情况下中止写入过程。
const proxy = new PassThrough()
// 将取消信号与代理流关联,以便在取消时自动中止流。
addAbortSignal(context.signal, proxy)
// 监听代理流的错误事件,处理流中的错误。
proxy.on('error', (err) => {
// 取消请求流与代理流的管道连接。
req.unpipe(proxy)
// 如果错误是 AbortError则返回 ABORTED 错误,否则返回原始错误。
reject(err.name === 'AbortError' ? ERRORS.ABORTED : err)
})
// 使用 throttle 函数创建一个节流函数,用于定期触发 POST_RECEIVE_V2 事件。
// 该事件用于通知上传进度,避免频繁触发事件导致性能问题。
const postReceive = throttle(
(offset: number) => {
// 触发 POST_RECEIVE_V2 事件,传递当前上传的偏移量。
this.emit(EVENTS.POST_RECEIVE_V2, req, { ...upload, offset })
},
// 设置节流的时间间隔,避免事件触发过于频繁。
this.options.postReceiveInterval,
{ leading: false }
)
// 临时变量,用于跟踪当前写入的偏移量。
let tempOffset = upload.offset
// 监听代理流的 data 事件,每当有数据块通过时更新偏移量并触发进度事件。
proxy.on('data', (chunk: Buffer) => {
tempOffset += chunk.byteLength
postReceive(tempOffset)
})
// 监听请求流的 error 事件,处理请求流中的错误。
req.on('error', () => {
// 如果代理流未关闭,则优雅地结束流,以便将剩余的字节作为 incompletePart 上传到存储。
if (!proxy.closed) {
proxy.end()
}
})
// 使用 stream.pipeline 将请求流通过代理流和 StreamLimiter 传输到存储系统。
// StreamLimiter 用于限制写入的数据量,确保不超过最大文件大小。
stream
.pipeline(
// 将请求流通过代理流传输。
req.pipe(proxy),
// 使用 StreamLimiter 限制写入的数据量。
new StreamLimiter(maxFileSize),
// 将数据流写入存储系统。
async (stream) => {
return this.store.write(stream as StreamLimiter, upload.id, upload.offset)
}
)
// 如果管道操作成功,则解析 Promise 并返回写入的字节数。
.then(resolve)
// 如果管道操作失败,则拒绝 Promise 并返回错误。
.catch(reject)
})
}
/**
*
* @param req - HTTP
* @param id - ID
* @returns
*/
getConfiguredMaxSize(req: http.IncomingMessage, id: string | null) {
if (typeof this.options.maxSize === 'function') {
return this.options.maxSize(req, id)
}
return this.options.maxSize ?? 0
}
/**
*
*
* @param req - HTTP
* @param file -
* @param configuredMaxSize -
* @returns
* @throws ERRORS.ERR_SIZE_EXCEEDED
*/
async calculateMaxBodySize(
req: http.IncomingMessage,
file: Upload,
configuredMaxSize?: number
) {
// 如果未明确提供,则使用服务器配置的最大大小。
configuredMaxSize ??= await this.getConfiguredMaxSize(req, file.id)
// 从请求中解析 Content-Length 头(如果未设置,则默认为 0
const length = Number.parseInt(req.headers['content-length'] || '0', 10)
const offset = file.offset
const hasContentLengthSet = req.headers['content-length'] !== undefined
const hasConfiguredMaxSizeSet = configuredMaxSize > 0
if (file.sizeIsDeferred) {
// 对于延迟大小的上传,如果不是分块传输,则检查配置的最大大小。
if (
hasContentLengthSet &&
hasConfiguredMaxSizeSet &&
offset + length > configuredMaxSize
) {
throw ERRORS.ERR_SIZE_EXCEEDED
}
if (hasConfiguredMaxSizeSet) {
return configuredMaxSize - offset
}
return Number.MAX_SAFE_INTEGER
}
// 检查上传是否适合文件的大小(当大小不是延迟的时)。
if (offset + length > (file.size || 0)) {
throw ERRORS.ERR_SIZE_EXCEEDED
}
if (hasContentLengthSet) {
return length
}
return (file.size || 0) - offset
}
}

View File

@ -0,0 +1,64 @@
import { CancellationContext, ERRORS, EVENTS } from '../utils'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
export class DeleteHandler extends BaseHandler {
/**
* DELETE请求的核心方法
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param context
* @returns HTTP响应对象
*
*
* - HTTP DELETE方法删除指定资源
* - 使
* -
*
*
* -
* - 使
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse,
context: CancellationContext
) {
// 从请求中提取文件ID
const id = this.getFileIdFromRequest(req)
// 文件ID不存在时抛出异常
if (!id) {
throw ERRORS.FILE_NOT_FOUND
}
// 执行自定义的请求处理钩子
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
// 获取文件操作锁,保证并发安全
const lock = await this.acquireLock(req, id, context)
try {
// 检查是否禁止删除已完成的上传
if (this.options.disableTerminationForFinishedUploads) {
const upload = await this.store.getUpload(id)
// 上传已完成时抛出异常
if (upload.offset === upload.size) {
throw ERRORS.INVALID_TERMINATION
}
}
// 从存储中删除指定文件
await this.store.remove(id)
} finally {
// 无论成功与否,最终都要释放锁
await lock.unlock()
}
// 返回204 No Content响应
const writtenRes = this.write(res, 204, {})
// 触发删除完成事件
this.emit(EVENTS.POST_TERMINATE, req, writtenRes, id)
return writtenRes
}
}

View File

@ -0,0 +1,189 @@
/**
* GetHandler.ts
* HTTP GET请求
* 使Web服务
*/
import stream from 'node:stream'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
import type { RouteHandler } from '../types'
import { ERRORS, Upload } from '../utils'
/**
* GetHandler类
* GET请求
*
* 使
* const handler = new GetHandler()
* handler.registerPath('/custom', customHandler)
*/
export class GetHandler extends BaseHandler {
// 使用Map存储路径与处理函数的映射关系提供O(1)的查找时间复杂度
paths: Map<string,RouteHandler> = new Map()
/**
* MIME类型是否符合RFC1341规范
* MIME类型text/plain; charset=utf-8
* O(n)n为字符串长度
*
*/
reMimeType =
// biome-ignore lint/suspicious/noControlCharactersInRegex: it's fine
/^(?:application|audio|example|font|haptics|image|message|model|multipart|text|video|x-(?:[0-9A-Za-z!#$%&'*+.^_`|~-]+))\/([0-9A-Za-z!#$%&'*+.^_`|~-]+)((?:[ ]*;[ ]*[0-9A-Za-z!#$%&'*+.^_`|~-]+=(?:[0-9A-Za-z!#$%&'*+.^_`|~-]+|"(?:[^"\\]|\.)*"))*)$/
/**
* MIME类型白名单
* 使Set数据结构O(1)
*
*/
mimeInlineBrowserWhitelist = new Set([
'text/plain',
'image/png',
'image/jpeg',
'image/gif',
'image/bmp',
'image/webp',
'audio/wave',
'audio/wav',
'audio/x-wav',
'audio/x-pn-wav',
'audio/webm',
'audio/ogg',
'video/mp4',
'video/webm',
'video/ogg',
'application/ogg',
])
/**
*
*
*
* - path: 请求路径
* - handler: 处理函数
* O(1)
*
*/
registerPath(path: string, handler: RouteHandler): void {
this.paths.set(path, handler)
}
/**
*
* GET请求
*
* - req: HTTP请求对象
* - res: HTTP响应对象
* void
* FILE_NOT_FOUND错误
* O(n)n为文件大小
*
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse
// biome-ignore lint/suspicious/noConfusingVoidType: it's fine
): Promise<stream.Writable | void> {
// 检查是否注册了自定义路径处理
if (this.paths.has(req.url as string)) {
const handler = this.paths.get(req.url as string) as RouteHandler
return handler(req, res)
}
// 检查数据存储是否支持读取操作
if (!('read' in this.store)) {
throw ERRORS.FILE_NOT_FOUND
}
// 从请求中提取文件ID
const id = this.getFileIdFromRequest(req)
if (!id) {
throw ERRORS.FILE_NOT_FOUND
}
// 执行自定义请求处理回调
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
// 获取文件上传状态
const stats = await this.store.getUpload(id)
// 验证文件是否完整
if (!stats || stats.offset !== stats.size) {
throw ERRORS.FILE_NOT_FOUND
}
// 处理内容类型和内容处置头
const { contentType, contentDisposition } = this.filterContentType(stats)
// 创建文件读取流
// @ts-expect-error exists if supported
const file_stream = await this.store.read(id)
const headers = {
'Content-Length': stats.offset,
'Content-Type': contentType,
'Content-Disposition': contentDisposition,
}
res.writeHead(200, headers)
// 使用流管道传输数据
return stream.pipeline(file_stream, res, () => {
// 忽略流传输错误
})
}
/**
*
* Content-Type和Content-Disposition头
*
* - stats: 文件上传状态对象
* contentType和contentDisposition的对象
* O(1)
* MIME类型验证规则
*/
filterContentType(stats: Upload): {
contentType: string
contentDisposition: string
} {
let contentType: string
let contentDisposition: string
// 从元数据中提取文件类型和名称
const { filetype, filename } = stats.metadata ?? {}
// 验证文件类型格式
if (filetype && this.reMimeType.test(filetype)) {
contentType = filetype
// 检查是否在白名单中
if (this.mimeInlineBrowserWhitelist.has(filetype)) {
contentDisposition = 'inline'
} else {
contentDisposition = 'attachment'
}
} else {
// 使用默认类型并强制下载
contentType = 'application/octet-stream'
contentDisposition = 'attachment'
}
// 添加文件名到内容处置头
if (filename) {
contentDisposition += `; filename=${this.quote(filename)}`
}
return {
contentType,
contentDisposition,
}
}
/**
*
*
*
* - value: 需要转义的字符串
*
* O(n)n为字符串长度
* 使
*/
quote(value: string) {
return `"${value.replace(/"/g, '\\"')}"`
}
}

View File

@ -0,0 +1,90 @@
/**
* HeadHandler
* TUS协议的HEAD请求
* 使使
*/
import { CancellationContext, ERRORS, Upload, Metadata } from '../utils'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
/**
* HeadHandler类
* TUS协议的HEAD请求
* BaseHandler
* 使
* const handler = new HeadHandler(store, options)
* await handler.send(req, res, context)
*/
export class HeadHandler extends BaseHandler {
/**
* HEAD请求的核心方法
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param context
* @returns HTTP响应
* @throws ERRORS.FILE_NOT_FOUND ID不存在时抛出
* @throws ERRORS.FILE_NO_LONGER_EXISTS
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse,
context: CancellationContext
) {
// 从请求中提取文件ID
const id = this.getFileIdFromRequest(req)
if (!id) {
throw ERRORS.FILE_NOT_FOUND
}
// 执行自定义的请求预处理逻辑
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
// 获取文件锁,防止并发操作
const lock = await this.acquireLock(req, id, context)
let file: Upload
try {
// 从存储中获取文件上传信息
file = await this.store.getUpload(id)
} finally {
// 无论成功与否,都释放锁
await lock.unlock()
}
// 检查文件是否已过期
const now = new Date()
if (
this.store.hasExtension('expiration') &&
this.store.getExpiration() > 0 &&
file.creation_date &&
now > new Date(new Date(file.creation_date).getTime() + this.store.getExpiration())
) {
throw ERRORS.FILE_NO_LONGER_EXISTS
}
// 设置响应头,防止缓存
res.setHeader('Cache-Control', 'no-store')
// 返回当前上传偏移量
res.setHeader('Upload-Offset', file.offset)
// 处理文件大小信息
if (file.sizeIsDeferred) {
// 如果文件大小未知,设置延迟长度标志
res.setHeader('Upload-Defer-Length', '1')
} else {
// 如果文件大小已知,返回实际大小
res.setHeader('Upload-Length', file.size as number)
}
// 处理文件元数据
if (file.metadata !== undefined) {
// 将元数据转换为字符串格式返回
res.setHeader('Upload-Metadata', Metadata.stringify(file.metadata) as string)
}
// 结束响应
return res.end()
}
}

View File

@ -0,0 +1,61 @@
/**
* OptionsHandler
* TUS协议的OPTIONS请求TUS协议版本
* 使TUS文件上传协议中OPTIONS请求获取服务器支持的功能和配置
*/
import { ALLOWED_METHODS, HEADERS, MAX_AGE } from '../utils'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
/**
* OptionsHandler类
* TUS协议的OPTIONS请求
* BaseHandler
* 使
* const handler = new OptionsHandler(store, options)
* handler.send(req, res)
*/
export class OptionsHandler extends BaseHandler {
/**
* OPTIONS请求并发送响应
* @param req - HTTP请求对象
* @param res - HTTP响应对象
* @returns Promise<void>
*
* 1.
* 2. TUS协议版本
* 3. CORS相关头信息
* 4. 204 No Content状态码
* BaseHandler的异常处理机制
*/
async send(req: http.IncomingMessage, res: http.ServerResponse) {
// 获取服务器配置的最大文件大小
const maxSize = await this.getConfiguredMaxSize(req, null)
// 设置TUS协议版本头固定为1.0.0
res.setHeader('Tus-Version', '1.0.0')
// 如果存储模块支持扩展功能设置TUS扩展头
if (this.store.extensions.length > 0) {
res.setHeader('Tus-Extension', this.store.extensions.join(','))
}
// 如果配置了最大文件大小设置TUS最大文件大小头
if (maxSize) {
res.setHeader('Tus-Max-Size', maxSize)
}
// 合并默认和自定义的允许头信息
const allowedHeaders = [...HEADERS, ...(this.options.allowedHeaders ?? [])]
// 设置CORS相关头信息
res.setHeader('Access-Control-Allow-Methods', ALLOWED_METHODS)
res.setHeader('Access-Control-Allow-Headers', allowedHeaders.join(', '))
res.setHeader('Access-Control-Max-Age', MAX_AGE)
// 返回204 No Content状态码表示请求成功但无内容返回
return this.write(res, 204)
}
}

View File

@ -0,0 +1,256 @@
/**
* PATCH请求处理器模块
*
* TUS协议中的PATCH请求
*
*
* 使
* -
* -
* -
*/
import debug from 'debug'
import { BaseHandler } from './BaseHandler'
import type http from 'node:http'
import { CancellationContext, ERRORS, Upload, EVENTS } from '../utils'
const log = debug('tus-node-server:handlers:patch')
/**
* PATCH请求处理器类
*
* BaseHandlerTUS协议的PATCH请求
*
*
*
* -
* - 使async/await处理异步操作
* - EVENTS触发相关事件
*
* 使
* const handler = new PatchHandler(store, options)
* handler.send(req, res, context)
*/
export class PatchHandler extends BaseHandler {
/**
* PATCH请求的核心方法
*
*
* 1.
* 2.
* 3.
* 4.
* 5.
*
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param context
* @returns HTTP响应
*
*
* - ERRORS.FILE_NOT_FOUND
* - ERRORS.MISSING_OFFSET
* - ERRORS.INVALID_CONTENT_TYPE
* - ERRORS.FILE_NO_LONGER_EXISTS
* - ERRORS.INVALID_OFFSET
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse,
context: CancellationContext
) {
try {
// 从请求中获取文件ID
const id = this.getFileIdFromRequest(req)
// console.log('id', id)
if (!id) {
throw ERRORS.FILE_NOT_FOUND
}
// 验证Upload-Offset头是否存在
if (req.headers['upload-offset'] === undefined) {
throw ERRORS.MISSING_OFFSET
}
// 解析偏移量
const offset = Number.parseInt(req.headers['upload-offset'] as string, 10)
// 验证Content-Type头是否存在
const content_type = req.headers['content-type']
if (content_type === undefined) {
throw ERRORS.INVALID_CONTENT_TYPE
}
// 触发请求到达事件
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
// 获取配置的最大文件大小
const maxFileSize = await this.getConfiguredMaxSize(req, id)
// 获取文件锁
const lock = await this.acquireLock(req, id, context)
let upload: Upload
let newOffset: number
try {
// 从存储中获取上传信息
upload = await this.store.getUpload(id)
// 检查文件是否已过期
const now = Date.now()
const creation = upload.creation_date
? new Date(upload.creation_date).getTime()
: now
const expiration = creation + this.store.getExpiration()
if (
this.store.hasExtension('expiration') &&
this.store.getExpiration() > 0 &&
now > expiration
) {
throw ERRORS.FILE_NO_LONGER_EXISTS
}
// 验证偏移量是否匹配
if (upload.offset !== offset) {
log(
`[PatchHandler] send: Incorrect offset - ${offset} sent but file is ${upload.offset}`
)
throw ERRORS.INVALID_OFFSET
}
// 处理上传长度相关头信息
const upload_length = req.headers['upload-length'] as string | undefined
if (upload_length !== undefined) {
const size = Number.parseInt(upload_length, 10)
// 检查是否支持延迟长度声明
if (!this.store.hasExtension('creation-defer-length')) {
throw ERRORS.UNSUPPORTED_CREATION_DEFER_LENGTH_EXTENSION
}
// 检查上传长度是否已设置
if (upload.size !== undefined) {
throw ERRORS.INVALID_LENGTH
}
// 验证长度是否有效
if (size < upload.offset) {
throw ERRORS.INVALID_LENGTH
}
// 检查是否超过最大文件大小
if (maxFileSize > 0 && size > maxFileSize) {
throw ERRORS.ERR_MAX_SIZE_EXCEEDED
}
// 声明上传长度
await this.store.declareUploadLength(id, size)
upload.size = size
}
// 计算最大请求体大小
const maxBodySize = await this.calculateMaxBodySize(req, upload, maxFileSize)
// 写入数据到存储
newOffset = await this.writeToStore(req, upload, maxBodySize, context)
} finally {
// 释放文件锁
await lock.unlock()
}
// 更新上传偏移量
upload.offset = newOffset
// 触发数据接收完成事件
this.emit(EVENTS.POST_RECEIVE, req, res, upload)
// 构建响应数据
const responseData = {
status: 204,
headers: {
'Upload-Offset': newOffset,
} as Record<string, string | number>,
body: '',
}
// 处理上传完成事件
// 文件上传完成后的处理逻辑块
if (newOffset === upload.size && this.options.onUploadFinish) {
try {
// 调用上传完成回调函数,支持异步处理
// 允许用户自定义上传完成后的处理逻辑
const resOrObject = await this.options.onUploadFinish(req, res, upload)
// 兼容性处理:支持两种返回类型
// 1. 直接返回 http.ServerResponse 对象
// 2. 返回包含自定义响应信息的对象
if (
// 检查是否为标准 ServerResponse 对象
typeof (resOrObject as http.ServerResponse).write === 'function' &&
typeof (resOrObject as http.ServerResponse).writeHead === 'function'
) {
// 直接使用返回的服务器响应对象
res = resOrObject as http.ServerResponse
} else {
// 处理自定义响应对象的类型定义
// 排除 ServerResponse 类型,确保类型安全
type ExcludeServerResponse<T> = T extends http.ServerResponse ? never : T
// 将返回对象转换为自定义响应对象
const obj = resOrObject as ExcludeServerResponse<typeof resOrObject>
// 更新响应对象
res = obj.res
// 可选地更新响应状态码
if (obj.status_code) responseData.status = obj.status_code
// 可选地更新响应体
if (obj.body) responseData.body = obj.body
// 合并响应头,允许覆盖默认头
if (obj.headers)
responseData.headers = Object.assign(obj.headers, responseData.headers)
}
} catch (error: any) {
// 错误处理:记录上传完成回调中的错误
// 使用日志记录错误信息,并重新抛出异常
log(`onUploadFinish: ${error.body}`)
throw error
}
}
// 处理文件过期时间
if (
this.store.hasExtension('expiration') &&
this.store.getExpiration() > 0 &&
upload.creation_date &&
(upload.size === undefined || newOffset < upload.size)
) {
const creation = new Date(upload.creation_date)
const dateString = new Date(
creation.getTime() + this.store.getExpiration()
).toUTCString()
responseData.headers['Upload-Expires'] = dateString
}
// 发送响应
const writtenRes = this.write(
res,
responseData.status,
responseData.headers,
responseData.body
)
// 触发上传完成事件
if (newOffset === upload.size) {
this.emit(EVENTS.POST_FINISH, req, writtenRes, upload)
}
return writtenRes
} catch (e) {
// 取消操作
context.abort()
throw e
}
}
}

View File

@ -0,0 +1,257 @@
import debug from 'debug'
import { BaseHandler } from './BaseHandler'
import { validateHeader } from '../validators/HeaderValidator'
import type http from 'node:http'
import type { ServerOptions, WithRequired } from '../types'
import { DataStore, Uid, CancellationContext, ERRORS, Metadata, Upload, EVENTS } from '../utils'
const log = debug('tus-node-server:handlers:post')
/**
* PostHandler HTTP POST DataStore
* BaseHandler
*/
export class PostHandler extends BaseHandler {
// 重写 BaseHandler 中的 options 类型,确保在构造函数中设置了 namingFunction
declare options: WithRequired<ServerOptions, 'namingFunction'>
/**
* PostHandler
* @param store - DataStore
* @param options - namingFunction
* @throws namingFunction
*/
constructor(store: DataStore, options: ServerOptions) {
if (options.namingFunction && typeof options.namingFunction !== 'function') {
throw new Error("'namingFunction' must be a function")
}
if (!options.namingFunction) {
options.namingFunction = Uid.rand
}
super(store, options)
}
/**
* DataStore
* @param req - HTTP
* @param res - HTTP
* @param context -
* @returns HTTP
* @throws 'upload-concat' DataStore 'concatentation'
* @throws 'upload-length' 'upload-defer-length'
* @throws 'upload-metadata'
* @throws
*/
async send(
req: http.IncomingMessage,
res: http.ServerResponse,
context: CancellationContext
) {
if ('upload-concat' in req.headers && !this.store.hasExtension('concatentation')) {
throw ERRORS.UNSUPPORTED_CONCATENATION_EXTENSION
}
const upload_length = req.headers['upload-length'] as string | undefined
const upload_defer_length = req.headers['upload-defer-length'] as string | undefined
const upload_metadata = req.headers['upload-metadata'] as string | undefined
if (
upload_defer_length !== undefined && // 如果扩展不支持,则抛出错误
!this.store.hasExtension('creation-defer-length')
) {
throw ERRORS.UNSUPPORTED_CREATION_DEFER_LENGTH_EXTENSION
}
if ((upload_length === undefined) === (upload_defer_length === undefined)) {
throw ERRORS.INVALID_LENGTH
}
let metadata: ReturnType<(typeof Metadata)['parse']> | undefined
if ('upload-metadata' in req.headers) {
try {
metadata = Metadata.parse(upload_metadata)
} catch {
throw ERRORS.INVALID_METADATA
}
}
let id: string
try {
id = await this.options.namingFunction(req, metadata)
} catch (error) {
log('create: check your `namingFunction`. Error', error)
throw error
}
const maxFileSize = await this.getConfiguredMaxSize(req, id)
if (
upload_length &&
maxFileSize > 0 &&
Number.parseInt(upload_length, 10) > maxFileSize
) {
throw ERRORS.ERR_MAX_SIZE_EXCEEDED
}
if (this.options.onIncomingRequest) {
await this.options.onIncomingRequest(req, res, id)
}
const upload = new Upload({
id,
size: upload_length ? Number.parseInt(upload_length, 10) : undefined,
offset: 0,
metadata,
})
if (this.options.onUploadCreate) {
try {
const resOrObject = await this.options.onUploadCreate(req, res, upload)
// 向后兼容,将在下一个主要版本中移除
// 由于在测试中模拟了实例,因此无法使用 `instanceof` 进行检查
if (
typeof (resOrObject as http.ServerResponse).write === 'function' &&
typeof (resOrObject as http.ServerResponse).writeHead === 'function'
) {
res = resOrObject as http.ServerResponse
} else {
// 由于 TS 只理解 instanceof因此类型定义较为丑陋
type ExcludeServerResponse<T> = T extends http.ServerResponse ? never : T
const obj = resOrObject as ExcludeServerResponse<typeof resOrObject>
res = obj.res
if (obj.metadata) {
upload.metadata = obj.metadata
}
}
} catch (error: any) {
log(`onUploadCreate error: ${error.body}`)
throw error
}
}
const lock = await this.acquireLock(req, id, context)
let isFinal: boolean
let url: string
// 推荐的响应默认值
const responseData = {
status: 201,
headers: {} as Record<string, string | number>,
body: '',
}
try {
await this.store.create(upload)
url = this.generateUrl(req, upload.id)
this.emit(EVENTS.POST_CREATE, req, res, upload, url)
isFinal = upload.size === 0 && !upload.sizeIsDeferred
// 如果请求中包含 Content-Type 头,并且使用了 creation-with-upload 扩展
if (validateHeader('content-type', req.headers['content-type'])) {
const bodyMaxSize = await this.calculateMaxBodySize(req, upload, maxFileSize)
const newOffset = await this.writeToStore(req, upload, bodyMaxSize, context)
responseData.headers['Upload-Offset'] = newOffset.toString()
isFinal = newOffset === Number.parseInt(upload_length as string, 10)
upload.offset = newOffset
}
} catch (e) {
context.abort()
throw e
} finally {
await lock.unlock()
}
// 上传完成后的处理逻辑
if (isFinal && this.options.onUploadFinish) {
try {
// 调用自定义的上传完成回调函数,传入请求、响应和上传对象
// 允许用户自定义上传完成后的处理逻辑
const resOrObject = await this.options.onUploadFinish(req, res, upload)
// 兼容性处理:检查返回值是否为 HTTP 响应对象
// 通过检查对象是否具有 write 和 writeHead 方法来判断
if (
typeof (resOrObject as http.ServerResponse).write === 'function' &&
typeof (resOrObject as http.ServerResponse).writeHead === 'function'
) {
// 如果直接返回 HTTP 响应对象,直接覆盖原响应对象
res = resOrObject as http.ServerResponse
} else {
// 处理自定义返回对象的情况
// 使用复杂的类型定义排除 ServerResponse 类型
type ExcludeServerResponse<T> = T extends http.ServerResponse ? never : T
// 将返回对象转换为非 ServerResponse 类型
const obj = resOrObject as ExcludeServerResponse<typeof resOrObject>
// 更新响应对象
res = obj.res
// 根据返回对象更新响应状态码
if (obj.status_code) responseData.status = obj.status_code
// 更新响应体
if (obj.body) responseData.body = obj.body
// 合并响应头,允许覆盖默认头
if (obj.headers)
responseData.headers = Object.assign(obj.headers, responseData.headers)
}
} catch (error: any) {
// 记录上传完成回调中的错误
log(`onUploadFinish: ${error.body}`)
// 抛出错误,中断上传流程
throw error
}
}
// Upload-Expires 响应头指示未完成的上传何时过期。
// 如果在创建时已知过期时间,则必须在响应中包含 Upload-Expires 头
if (
this.store.hasExtension('expiration') &&
this.store.getExpiration() > 0 &&
upload.creation_date
) {
const created = await this.store.getUpload(upload.id)
if (created.offset !== Number.parseInt(upload_length as string, 10)) {
const creation = new Date(upload.creation_date)
// 值必须为 RFC 7231 日期时间格式
responseData.headers['Upload-Expires'] = new Date(
creation.getTime() + this.store.getExpiration()
).toUTCString()
}
}
// 仅在最终的 HTTP 状态码为 201 或 3xx 时附加 Location 头
if (
responseData.status === 201 ||
(responseData.status >= 300 && responseData.status < 400)
) {
responseData.headers.Location = url
}
const writtenRes = this.write(
res,
responseData.status,
responseData.headers,
responseData.body
)
if (isFinal) {
this.emit(EVENTS.POST_FINISH, req, writtenRes, upload)
}
return writtenRes
}
}

View File

@ -0,0 +1,5 @@
export { Server } from './server'
export * from './types'
export * from './lockers'
export * from './utils'
export * from "./store"

View File

@ -0,0 +1,145 @@
/**
* MemoryLocker Locker
* 访
*
*
* - 使访
* -
* -
*
*
* - `lock` `cancelReq`
*
* -
*
*
* - `lock`
* - `unlock` 使使
*/
import { RequestRelease, Locker, ERRORS, Lock } from "../utils"
export interface MemoryLockerOptions {
acquireLockTimeout: number
}
interface LockEntry {
requestRelease: RequestRelease
}
export class MemoryLocker implements Locker {
timeout: number
locks = new Map<string, LockEntry>()
constructor(options?: MemoryLockerOptions) {
this.timeout = options?.acquireLockTimeout ?? 1000 * 30
}
/**
* MemoryLock
* @param id
* @returns MemoryLock
*/
newLock(id: string) {
return new MemoryLock(id, this, this.timeout)
}
}
class MemoryLock implements Lock {
constructor(
private id: string,
private locker: MemoryLocker,
private timeout: number = 1000 * 30
) { }
/**
*
* @param requestRelease
* @throws ERRORS.ERR_LOCK_TIMEOUT
*/
async lock(requestRelease: RequestRelease): Promise<void> {
const abortController = new AbortController()
const lock = await Promise.race([
this.waitTimeout(abortController.signal),
this.acquireLock(this.id, requestRelease, abortController.signal),
])
abortController.abort()
if (!lock) {
throw ERRORS.ERR_LOCK_TIMEOUT
}
}
/**
* ID
* @param id
* @param requestRelease
* @param signal AbortSignal
* @returns true false
*/
protected async acquireLock(
id: string,
requestRelease: RequestRelease,
signal: AbortSignal
): Promise<boolean> {
if (signal.aborted) {
return false
}
const lock = this.locker.locks.get(id)
if (!lock) {
const lock = {
requestRelease,
}
this.locker.locks.set(id, lock)
return true
}
await lock.requestRelease?.()
return await new Promise((resolve, reject) => {
// 使用 setImmediate 的原因:
// 1. 通过将递归调用推迟到下一个事件循环迭代来防止堆栈溢出。
// 2. 允许事件循环处理其他挂起的事件,保持服务器的响应性。
// 3. 通过给其他请求获取锁的机会,确保锁获取的公平性。
setImmediate(() => {
this.acquireLock(id, requestRelease, signal).then(resolve).catch(reject)
})
})
}
/**
*
* @throws
*/
async unlock(): Promise<void> {
const lock = this.locker.locks.get(this.id)
if (!lock) {
throw new Error('Releasing an unlocked lock!')
}
this.locker.locks.delete(this.id)
}
/**
*
* @param signal AbortSignal
* @returns false
*/
protected waitTimeout(signal: AbortSignal) {
return new Promise<boolean>((resolve) => {
const timeout = setTimeout(() => {
resolve(false)
}, this.timeout)
const abortListener = () => {
clearTimeout(timeout)
signal.removeEventListener('abort', abortListener)
resolve(false)
}
signal.addEventListener('abort', abortListener)
})
}
}

View File

@ -0,0 +1 @@
export * from './MemoryLocker'

519
packages/tus/src/server.ts Normal file
View File

@ -0,0 +1,519 @@
import http from "node:http";
import { EventEmitter } from "node:events";
import debug from "debug";
import { GetHandler } from "./handlers/GetHandler";
import { HeadHandler } from "./handlers/HeadHandler";
import { OptionsHandler } from "./handlers/OptionsHandler";
import { PatchHandler } from "./handlers/PatchHandler";
import { PostHandler } from "./handlers/PostHandler";
import { DeleteHandler } from "./handlers/DeleteHandler";
import { validateHeader } from "./validators/HeaderValidator";
import type stream from "node:stream";
import type { ServerOptions, RouteHandler, WithOptional } from "./types";
import { MemoryLocker } from "./lockers";
import {
EVENTS,
Upload,
DataStore,
REQUEST_METHODS,
ERRORS,
TUS_RESUMABLE,
EXPOSED_HEADERS,
CancellationContext,
} from "./utils";
/**
*
* TUS服务器支持的各种HTTP方法对应的处理器实例类型
*/
type Handlers = {
GET: InstanceType<typeof GetHandler>; // GET请求处理器
HEAD: InstanceType<typeof HeadHandler>; // HEAD请求处理器
OPTIONS: InstanceType<typeof OptionsHandler>; // OPTIONS请求处理器
PATCH: InstanceType<typeof PatchHandler>; // PATCH请求处理器
POST: InstanceType<typeof PostHandler>; // POST请求处理器
DELETE: InstanceType<typeof DeleteHandler>; // DELETE请求处理器
};
/**
* TUS服务器事件接口定义
*
*/
interface TusEvents {
/**
*
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param upload
* @param url URL
*/
[EVENTS.POST_CREATE]: (
req: http.IncomingMessage,
res: http.ServerResponse,
upload: Upload,
url: string
) => void;
/**
* @deprecated ()
* 使 POST_RECEIVE_V2
*/
[EVENTS.POST_RECEIVE]: (
req: http.IncomingMessage,
res: http.ServerResponse,
upload: Upload
) => void;
/**
* V2版本
* @param req HTTP请求对象
* @param upload
*/
[EVENTS.POST_RECEIVE_V2]: (
req: http.IncomingMessage,
upload: Upload
) => void;
/**
*
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param upload
*/
[EVENTS.POST_FINISH]: (
req: http.IncomingMessage,
res: http.ServerResponse,
upload: Upload
) => void;
/**
*
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param id
*/
[EVENTS.POST_TERMINATE]: (
req: http.IncomingMessage,
res: http.ServerResponse,
id: string
) => void;
}
/**
* EventEmitter事件处理器类型别名
*/
type on = EventEmitter["on"];
type emit = EventEmitter["emit"];
/**
* TUS服务器接口声明
* EventEmitter,
*/
export declare interface Server {
/**
*
* @param event TusEvents的键之一
* @param listener
* @returns Server实例以支持链式调用
*/
on<Event extends keyof TusEvents>(
event: Event,
listener: TusEvents[Event]
): this;
/**
*
* @param eventName
* @param listener
* @returns Server实例以支持链式调用
*/
on(eventName: Parameters<on>[0], listener: Parameters<on>[1]): this;
/**
*
* @param event TusEvents的键之一
* @param listener
* @returns emit函数的返回值
*/
emit<Event extends keyof TusEvents>(
event: Event,
listener: TusEvents[Event]
): ReturnType<emit>;
/**
*
* @param eventName
* @param listener
* @returns emit函数的返回值
*/
emit(
eventName: Parameters<emit>[0],
listener: Parameters<emit>[1]
): ReturnType<emit>;
}
/**
*
*/
const log = debug("tus-node-server");
// biome-ignore lint/suspicious/noUnsafeDeclarationMerging: it's fine
export class Server extends EventEmitter {
datastore: DataStore;
handlers: Handlers;
options: ServerOptions;
/**
* Server
* @param options -
* @throws optionspath datastore
*/
constructor(
options: WithOptional<ServerOptions, "locker"> & {
datastore: DataStore;
}
) {
super();
if (!options) {
throw new Error("'options' must be defined");
}
if (!options.path) {
throw new Error("'path' is not defined; must have a path");
}
if (!options.datastore) {
throw new Error(
"'datastore' is not defined; must have a datastore"
);
}
if (!options.locker) {
options.locker = new MemoryLocker();
}
if (!options.lockDrainTimeout) {
options.lockDrainTimeout = 3000;
}
if (!options.postReceiveInterval) {
options.postReceiveInterval = 1000;
}
const { datastore, ...rest } = options;
this.options = rest as ServerOptions;
this.datastore = datastore;
this.handlers = {
// GET 请求处理器应在具体实现中编写
GET: new GetHandler(this.datastore, this.options),
// 这些方法按照 tus 协议处理
HEAD: new HeadHandler(this.datastore, this.options),
OPTIONS: new OptionsHandler(this.datastore, this.options),
PATCH: new PatchHandler(this.datastore, this.options),
POST: new PostHandler(this.datastore, this.options),
DELETE: new DeleteHandler(this.datastore, this.options),
};
// 任何以方法为键分配给此对象的处理器将用于响应这些请求。
// 当数据存储分配给服务器时,它们会被设置/重置。
// 从服务器中移除任何事件监听器时,必须先从每个处理器中移除监听器。
// 这必须在添加 'newListener' 监听器之前完成,以避免为所有请求处理器添加 'removeListener' 事件监听器。
this.on("removeListener", (event: string, listener) => {
this.datastore.removeListener(event, listener);
for (const method of REQUEST_METHODS) {
this.handlers[method].removeListener(event, listener);
}
});
// 当事件监听器被添加到服务器时,确保它们从请求处理器冒泡到服务器级别。
this.on("newListener", (event: string, listener) => {
this.datastore.on(event, listener);
for (const method of REQUEST_METHODS) {
this.handlers[method].on(event, listener);
}
});
}
/**
* GET
* @param path -
* @param handler -
*/
get(path: string, handler: RouteHandler) {
this.handlers.GET.registerPath(path, handler);
}
/**
* 'request'
* @param req - HTTP
* @param res - HTTP
* @returns HTTP
*/
async handle(
req: http.IncomingMessage,
res: http.ServerResponse
// biome-ignore lint/suspicious/noConfusingVoidType: it's fine
): Promise<http.ServerResponse | stream.Writable | void> {
const context = this.createContext(req);
log(`[TusServer] handle: ${req.method} ${req.url}`);
// 允许覆盖 HTTP 方法。这样做的原因是某些库/环境不支持 PATCH 和 DELETE 请求,例如浏览器中的 Flash 和 Java 部分环境
if (req.headers["x-http-method-override"]) {
req.method = (
req.headers["x-http-method-override"] as string
).toUpperCase();
}
const onError = async (error: {
status_code?: number;
body?: string;
message: string;
}) => {
let status_code =
error.status_code || ERRORS.UNKNOWN_ERROR.status_code;
let body =
error.body ||
`${ERRORS.UNKNOWN_ERROR.body}${error.message || ""}\n`;
if (this.options.onResponseError) {
const errorMapping = await this.options.onResponseError(
req,
res,
error as Error
);
if (errorMapping) {
status_code = errorMapping.status_code;
body = errorMapping.body;
}
}
return this.write(context, req, res, status_code, body);
};
if (req.method === "GET") {
const handler = this.handlers.GET;
return handler.send(req, res).catch(onError);
}
// Tus-Resumable 头部必须包含在每个请求和响应中,除了 OPTIONS 请求。其值必须是客户端或服务器使用的协议版本。
res.setHeader("Tus-Resumable", TUS_RESUMABLE);
if (
req.method !== "OPTIONS" &&
req.headers["tus-resumable"] === undefined
) {
return this.write(
context,
req,
res,
412,
"Tus-Resumable Required\n"
);
}
// 验证所有必需的头部以符合 tus 协议
const invalid_headers = [];
for (const header_name in req.headers) {
if (req.method === "OPTIONS") {
continue;
}
// 内容类型仅对 PATCH 请求进行检查。对于所有其他请求方法,它将被忽略并视为未设置内容类型,
// 因为某些 HTTP 客户端可能会为此头部强制执行默认值。
// 参见 https://github.com/tus/tus-node-server/pull/116
if (
header_name.toLowerCase() === "content-type" &&
req.method !== "PATCH"
) {
continue;
}
if (
!validateHeader(
header_name,
req.headers[header_name] as string | undefined
)
) {
log(
`Invalid ${header_name} header: ${req.headers[header_name]}`
);
invalid_headers.push(header_name);
}
}
if (invalid_headers.length > 0) {
return this.write(
context,
req,
res,
400,
`Invalid ${invalid_headers.join(" ")}\n`
);
}
// 启用 CORS
res.setHeader("Access-Control-Allow-Origin", this.getCorsOrigin(req));
res.setHeader("Access-Control-Expose-Headers", EXPOSED_HEADERS);
if (this.options.allowedCredentials === true) {
res.setHeader("Access-Control-Allow-Credentials", "true");
}
// 调用请求方法的处理器
const handler = this.handlers[req.method as keyof Handlers];
if (handler) {
return handler.send(req, res, context).catch(onError);
}
return this.write(context, req, res, 404, "Not found\n");
}
/**
* CORS
*
* CORS源地址`origin`
* `origin`
* `*`
*
* @param req HTTP请求对象
* @returns CORS源地址`origin``*`
*
*
* - CORS策略的灵活性
* - `*`CORS配置
*/
private getCorsOrigin(req: http.IncomingMessage): string {
const origin = req.headers.origin;
// 检查请求头中的`origin`是否在允许的源列表中
const isOriginAllowed =
this.options.allowedOrigins?.some(
(allowedOrigin) => allowedOrigin === origin
) ?? true;
// 如果`origin`存在且在允许的源列表中,则返回该`origin`
if (origin && isOriginAllowed) {
return origin;
}
// 如果允许的源列表不为空,则返回列表中的第一个源地址
if (
this.options.allowedOrigins &&
this.options.allowedOrigins.length > 0
) {
return this.options.allowedOrigins[0];
}
// 如果允许的源列表为空,则返回通配符`*`,表示允许所有源地址
return "*";
}
/**
*
* @param context -
* @param req - HTTP
* @param res - HTTP
* @param status - HTTP
* @param body -
* @param headers -
* @returns HTTP
*/
write(
context: CancellationContext,
req: http.IncomingMessage,
res: http.ServerResponse,
status: number,
body = "",
headers = {}
) {
const isAborted = context.signal.aborted;
if (status !== 204) {
// @ts-expect-error not explicitly typed but possible
headers["Content-Length"] = Buffer.byteLength(body, "utf8");
}
if (isAborted) {
// 此条件处理请求被标记为中止的情况。
// 在这种情况下,服务器通知客户端连接将被关闭。
// 这是通过在响应中设置 'Connection' 头部为 'close' 来传达的。
// 这一步对于防止服务器继续处理不再需要的请求至关重要,从而节省资源。
// @ts-expect-error not explicitly typed but possible
headers.Connection = "close";
// 为响应 ('res') 添加 'finish' 事件的事件监听器。
// 'finish' 事件在响应已发送给客户端时触发。
// 一旦响应完成,请求 ('req') 对象将被销毁。
// 销毁请求对象是释放与此请求相关的任何资源的关键步骤,因为它已经被中止。
res.on("finish", () => {
req.destroy();
});
}
res.writeHead(status, headers);
res.write(body);
return res.end();
}
/**
*
* @param args -
* @returns HTTP
*/
// biome-ignore lint/suspicious/noExplicitAny: todo
listen(...args: any[]): http.Server {
return http.createServer(this.handle.bind(this)).listen(...args);
}
/**
*
* @returns
* @throws
*/
cleanUpExpiredUploads(): Promise<number> {
if (!this.datastore.hasExtension("expiration")) {
throw ERRORS.UNSUPPORTED_EXPIRATION_EXTENSION;
}
return this.datastore.deleteExpired();
}
/**
*
* @param req - HTTP
* @returns
*/
protected createContext(req: http.IncomingMessage) {
// 初始化两个 AbortController
// 1. `requestAbortController` 用于即时请求终止,特别适用于在发生错误时停止客户端上传。
// 2. `abortWithDelayController` 用于在终止前引入延迟,允许服务器有时间完成正在进行的操作。
// 这在未来的请求可能需要获取当前请求持有的锁时特别有用。
const requestAbortController = new AbortController();
const abortWithDelayController = new AbortController();
// 当 `abortWithDelayController` 被触发时调用此函数,以在指定延迟后中止请求。
const onDelayedAbort = (err: unknown) => {
abortWithDelayController.signal.removeEventListener(
"abort",
onDelayedAbort
);
setTimeout(() => {
requestAbortController.abort(err);
}, this.options.lockDrainTimeout);
};
abortWithDelayController.signal.addEventListener(
"abort",
onDelayedAbort
);
// 当请求关闭时,移除监听器以避免内存泄漏。
req.on("close", () => {
abortWithDelayController.signal.removeEventListener(
"abort",
onDelayedAbort
);
});
// 返回一个对象,包含信号和两个中止请求的方法。
// `signal` 用于监听请求中止事件。
// `abort` 方法用于立即中止请求。
// `cancel` 方法用于启动延迟中止序列。
return {
signal: requestAbortController.signal,
abort: () => {
// 立即中止请求
if (!requestAbortController.signal.aborted) {
requestAbortController.abort(ERRORS.ABORTED);
}
},
cancel: () => {
// 启动延迟中止序列,除非它已经在进行中。
if (!abortWithDelayController.signal.aborted) {
abortWithDelayController.abort(ERRORS.ABORTED);
}
},
};
}
}

View File

@ -0,0 +1,230 @@
// TODO: use /promises versions
import fs from 'node:fs'
import fsProm from 'node:fs/promises'
import path from 'node:path'
import stream from 'node:stream'
import type http from 'node:http'
import debug from 'debug'
import { DataStore, Upload, ERRORS } from '../../utils'
import {
FileKvStore as FileConfigstore,
MemoryKvStore as MemoryConfigstore,
RedisKvStore as RedisConfigstore,
KvStore as Configstore,
} from '../../utils'
type Options = {
directory: string
configstore?: Configstore
expirationPeriodInMilliseconds?: number
}
const MASK = '0777'
const IGNORED_MKDIR_ERROR = 'EEXIST'
const FILE_DOESNT_EXIST = 'ENOENT'
const log = debug('tus-node-server:stores:filestore')
export class FileStore extends DataStore {
directory: string
configstore: Configstore
expirationPeriodInMilliseconds: number
constructor({ directory, configstore, expirationPeriodInMilliseconds }: Options) {
super()
this.directory = directory
this.configstore = configstore ?? new FileConfigstore(directory)
this.expirationPeriodInMilliseconds = expirationPeriodInMilliseconds ?? 0
this.extensions = [
'creation',
'creation-with-upload',
'creation-defer-length',
'termination',
'expiration',
]
// TODO: this async call can not happen in the constructor
this.checkOrCreateDirectory()
}
/**
* Ensure the directory exists.
*/
private checkOrCreateDirectory() {
fs.mkdir(this.directory, { mode: MASK, recursive: true }, (error) => {
if (error && error.code !== IGNORED_MKDIR_ERROR) {
throw error
}
})
}
/**
* Create an empty file.
*/
async create(file: Upload): Promise<Upload> {
const dirs = file.id.split('/').slice(0, -1)
const filePath = path.join(this.directory, file.id)
await fsProm.mkdir(path.join(this.directory, ...dirs), { recursive: true })
await fsProm.writeFile(filePath, '')
await this.configstore.set(file.id, file)
file.storage = { type: 'file', path: filePath }
return file
}
read(file_id: string) {
return fs.createReadStream(path.join(this.directory, file_id))
}
remove(file_id: string): Promise<void> {
return new Promise((resolve, reject) => {
fs.unlink(`${this.directory}/${file_id}`, (err) => {
if (err) {
log('[FileStore] delete: Error', err)
reject(ERRORS.FILE_NOT_FOUND)
return
}
try {
resolve(this.configstore.delete(file_id))
} catch (error) {
reject(error)
}
})
})
}
write(
readable: http.IncomingMessage | stream.Readable,
file_id: string,
offset: number
): Promise<number> {
const file_path = path.join(this.directory, file_id)
const writeable = fs.createWriteStream(file_path, {
flags: 'r+',
start: offset,
})
let bytes_received = 0
const transform = new stream.Transform({
transform(chunk, _, callback) {
bytes_received += chunk.length
callback(null, chunk)
},
})
return new Promise((resolve, reject) => {
stream.pipeline(readable, transform, writeable, (err) => {
if (err) {
log('[FileStore] write: Error', err)
return reject(ERRORS.FILE_WRITE_ERROR)
}
log(`[FileStore] write: ${bytes_received} bytes written to ${file_path}`)
offset += bytes_received
log(`[FileStore] write: File is now ${offset} bytes`)
return resolve(offset)
})
})
}
async getUpload(id: string): Promise<Upload> {
const file = await this.configstore.get(id)
if (!file) {
throw ERRORS.FILE_NOT_FOUND
}
return new Promise((resolve, reject) => {
const file_path = `${this.directory}/${id}`
fs.stat(file_path, (error, stats) => {
if (error && error.code === FILE_DOESNT_EXIST && file) {
log(
`[FileStore] getUpload: No file found at ${file_path} but db record exists`,
file
)
return reject(ERRORS.FILE_NO_LONGER_EXISTS)
}
if (error && error.code === FILE_DOESNT_EXIST) {
log(`[FileStore] getUpload: No file found at ${file_path}`)
return reject(ERRORS.FILE_NOT_FOUND)
}
if (error) {
return reject(error)
}
if (stats.isDirectory()) {
log(`[FileStore] getUpload: ${file_path} is a directory`)
return reject(ERRORS.FILE_NOT_FOUND)
}
return resolve(
new Upload({
id,
size: file.size,
offset: stats.size,
metadata: file.metadata,
creation_date: file.creation_date,
storage: { type: 'file', path: file_path },
})
)
})
})
}
async declareUploadLength(id: string, upload_length: number) {
const file = await this.configstore.get(id)
if (!file) {
throw ERRORS.FILE_NOT_FOUND
}
file.size = upload_length
await this.configstore.set(id, file)
}
async deleteExpired(): Promise<number> {
const now = new Date()
const toDelete: Promise<void>[] = []
if (!this.configstore.list) {
throw ERRORS.UNSUPPORTED_EXPIRATION_EXTENSION
}
const uploadKeys = await this.configstore.list()
for (const file_id of uploadKeys) {
try {
const info = await this.configstore.get(file_id)
if (
info &&
'creation_date' in info &&
this.getExpiration() > 0 &&
info.size !== info.offset &&
info.creation_date
) {
const creation = new Date(info.creation_date)
const expires = new Date(creation.getTime() + this.getExpiration())
if (now > expires) {
toDelete.push(this.remove(file_id))
}
}
} catch (error) {
if (error !== ERRORS.FILE_NO_LONGER_EXISTS) {
throw error
}
}
}
await Promise.all(toDelete)
return toDelete.length
}
getExpiration(): number {
return this.expirationPeriodInMilliseconds
}
}

View File

@ -0,0 +1,2 @@
export * from "./file-store"
export * from "./s3-store"

View File

@ -0,0 +1,803 @@
import os from 'node:os'
import fs, { promises as fsProm } from 'node:fs'
import stream, { promises as streamProm } from 'node:stream'
import type { Readable } from 'node:stream'
import type AWS from '@aws-sdk/client-s3'
import { NoSuchKey, NotFound, S3, type S3ClientConfig } from '@aws-sdk/client-s3'
import debug from 'debug'
import {
DataStore,
StreamSplitter,
Upload,
ERRORS,
TUS_RESUMABLE,
type KvStore,
MemoryKvStore,
} from '../../utils'
import { Semaphore, type Permit } from '@shopify/semaphore'
import MultiStream from 'multistream'
import crypto from 'node:crypto'
import path from 'node:path'
const log = debug('tus-node-server:stores:s3store')
type Options = {
// The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
// The server calculates the optimal part size, which takes this size into account,
// but may increase it to not exceed the S3 10K parts limit.
partSize?: number
useTags?: boolean
maxConcurrentPartUploads?: number
cache?: KvStore<MetadataValue>
expirationPeriodInMilliseconds?: number
// Options to pass to the AWS S3 SDK.
s3ClientConfig: S3ClientConfig & { bucket: string }
}
export type MetadataValue = {
file: Upload
'upload-id': string
'tus-version': string
}
function calcOffsetFromParts(parts?: Array<AWS.Part>) {
// @ts-expect-error not undefined
return parts && parts.length > 0 ? parts.reduce((a, b) => a + b.Size, 0) : 0
}
// Implementation (based on https://github.com/tus/tusd/blob/master/s3store/s3store.go)
//
// Once a new tus upload is initiated, multiple objects in S3 are created:
//
// First of all, a new info object is stored which contains (as Metadata) a JSON-encoded
// blob of general information about the upload including its size and meta data.
// This kind of objects have the suffix ".info" in their key.
//
// In addition a new multipart upload
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) is
// created. Whenever a new chunk is uploaded to tus-node-server using a PATCH request, a
// new part is pushed to the multipart upload on S3.
//
// If meta data is associated with the upload during creation, it will be added
// to the multipart upload and after finishing it, the meta data will be passed
// to the final object. However, the metadata which will be attached to the
// final object can only contain ASCII characters and every non-ASCII character
// will be replaced by a question mark (for example, "Menü" will be "Men?").
// However, this does not apply for the metadata returned by the `_getMetadata`
// function since it relies on the info object for reading the metadata.
// Therefore, HEAD responses will always contain the unchanged metadata, Base64-
// encoded, even if it contains non-ASCII characters.
//
// Once the upload is finished, the multipart upload is completed, resulting in
// the entire file being stored in the bucket. The info object, containing
// meta data is not deleted.
//
// Considerations
//
// In order to support tus' principle of resumable upload, S3's Multipart-Uploads
// are internally used.
// For each incoming PATCH request (a call to `write`), a new part is uploaded
// to S3.
export class S3Store extends DataStore {
private bucket: string
private cache: KvStore<MetadataValue>
private client: S3
private preferredPartSize: number
private expirationPeriodInMilliseconds = 0
private useTags = true
private partUploadSemaphore: Semaphore
public maxMultipartParts = 10_000 as const
public minPartSize = 5_242_880 as const // 5MiB
public maxUploadSize = 5_497_558_138_880 as const // 5TiB
constructor(options: Options) {
super()
const { partSize, s3ClientConfig } = options
const { bucket, ...restS3ClientConfig } = s3ClientConfig
this.extensions = [
'creation',
'creation-with-upload',
'creation-defer-length',
'termination',
'expiration',
]
this.bucket = bucket
this.preferredPartSize = partSize || 8 * 1024 * 1024
this.expirationPeriodInMilliseconds = options.expirationPeriodInMilliseconds ?? 0
this.useTags = options.useTags ?? true
this.cache = options.cache ?? new MemoryKvStore<MetadataValue>()
this.client = new S3(restS3ClientConfig)
this.partUploadSemaphore = new Semaphore(options.maxConcurrentPartUploads ?? 60)
}
protected shouldUseExpirationTags() {
return this.expirationPeriodInMilliseconds !== 0 && this.useTags
}
protected useCompleteTag(value: 'true' | 'false') {
if (!this.shouldUseExpirationTags()) {
return undefined
}
return `Tus-Completed=${value}`
}
/**
* Saves upload metadata to a `${file_id}.info` file on S3.
* Please note that the file is empty and the metadata is saved
* on the S3 object's `Metadata` field, so that only a `headObject`
* is necessary to retrieve the data.
*/
private async saveMetadata(upload: Upload, uploadId: string) {
log(`[${upload.id}] saving metadata`)
await this.client.putObject({
Bucket: this.bucket,
Key: this.infoKey(upload.id),
Body: JSON.stringify(upload),
Tagging: this.useCompleteTag('false'),
Metadata: {
'upload-id': uploadId,
'tus-version': TUS_RESUMABLE,
},
})
log(`[${upload.id}] metadata file saved`)
}
private async completeMetadata(upload: Upload) {
if (!this.shouldUseExpirationTags()) {
return
}
const { 'upload-id': uploadId } = await this.getMetadata(upload.id)
await this.client.putObject({
Bucket: this.bucket,
Key: this.infoKey(upload.id),
Body: JSON.stringify(upload),
Tagging: this.useCompleteTag('true'),
Metadata: {
'upload-id': uploadId,
'tus-version': TUS_RESUMABLE,
},
})
}
/**
* Retrieves upload metadata previously saved in `${file_id}.info`.
* There's a small and simple caching mechanism to avoid multiple
* HTTP calls to S3.
*/
private async getMetadata(id: string): Promise<MetadataValue> {
const cached = await this.cache.get(id)
if (cached) {
return cached
}
const { Metadata, Body } = await this.client.getObject({
Bucket: this.bucket,
Key: this.infoKey(id),
})
const file = JSON.parse((await Body?.transformToString()) as string)
const metadata: MetadataValue = {
'tus-version': Metadata?.['tus-version'] as string,
'upload-id': Metadata?.['upload-id'] as string,
file: new Upload({
id,
size: file.size ? Number.parseInt(file.size, 10) : undefined,
offset: Number.parseInt(file.offset, 10),
metadata: file.metadata,
creation_date: file.creation_date,
storage: file.storage,
}),
}
await this.cache.set(id, metadata)
return metadata
}
private infoKey(id: string) {
return `${id}.info`
}
private partKey(id: string, isIncomplete = false) {
if (isIncomplete) {
id += '.part'
}
// TODO: introduce ObjectPrefixing for parts and incomplete parts.
// ObjectPrefix is prepended to the name of each S3 object that is created
// to store uploaded files. It can be used to create a pseudo-directory
// structure in the bucket, e.g. "path/to/my/uploads".
return id
}
private async uploadPart(
metadata: MetadataValue,
readStream: fs.ReadStream | Readable,
partNumber: number
): Promise<string> {
const data = await this.client.uploadPart({
Bucket: this.bucket,
Key: metadata.file.id,
UploadId: metadata['upload-id'],
PartNumber: partNumber,
Body: readStream,
})
log(`[${metadata.file.id}] finished uploading part #${partNumber}`)
return data.ETag as string
}
private async uploadIncompletePart(
id: string,
readStream: fs.ReadStream | Readable
): Promise<string> {
const data = await this.client.putObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
Body: readStream,
Tagging: this.useCompleteTag('false'),
})
log(`[${id}] finished uploading incomplete part`)
return data.ETag as string
}
private async downloadIncompletePart(id: string) {
const incompletePart = await this.getIncompletePart(id)
if (!incompletePart) {
return
}
const filePath = await this.uniqueTmpFileName('tus-s3-incomplete-part-')
try {
let incompletePartSize = 0
const byteCounterTransform = new stream.Transform({
transform(chunk, _, callback) {
incompletePartSize += chunk.length
callback(null, chunk)
},
})
// write to temporary file
await streamProm.pipeline(
incompletePart,
byteCounterTransform,
fs.createWriteStream(filePath)
)
const createReadStream = (options: { cleanUpOnEnd: boolean }) => {
const fileReader = fs.createReadStream(filePath)
if (options.cleanUpOnEnd) {
fileReader.on('end', () => {
fs.unlink(filePath, () => {
// ignore
})
})
fileReader.on('error', (err) => {
fileReader.destroy(err)
fs.unlink(filePath, () => {
// ignore
})
})
}
return fileReader
}
return {
size: incompletePartSize,
path: filePath,
createReader: createReadStream,
}
} catch (err) {
fsProm.rm(filePath).catch(() => {
/* ignore */
})
throw err
}
}
private async getIncompletePart(id: string): Promise<Readable | undefined> {
try {
const data = await this.client.getObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
})
return data.Body as Readable
} catch (error) {
if (error instanceof NoSuchKey) {
return undefined
}
throw error
}
}
private async getIncompletePartSize(id: string): Promise<number | undefined> {
try {
const data = await this.client.headObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
})
return data.ContentLength
} catch (error) {
if (error instanceof NotFound) {
return undefined
}
throw error
}
}
private async deleteIncompletePart(id: string): Promise<void> {
await this.client.deleteObject({
Bucket: this.bucket,
Key: this.partKey(id, true),
})
}
/**
* Uploads a stream to s3 using multiple parts
*/
private async uploadParts(
metadata: MetadataValue,
readStream: stream.Readable,
currentPartNumber: number,
offset: number
): Promise<number> {
const size = metadata.file.size
const promises: Promise<void>[] = []
let pendingChunkFilepath: string | null = null
let bytesUploaded = 0
let permit: Permit | undefined = undefined
const splitterStream = new StreamSplitter({
chunkSize: this.calcOptimalPartSize(size),
directory: os.tmpdir(),
})
.on('beforeChunkStarted', async () => {
permit = await this.partUploadSemaphore.acquire()
})
.on('chunkStarted', (filepath) => {
pendingChunkFilepath = filepath
})
.on('chunkFinished', ({ path, size: partSize }) => {
pendingChunkFilepath = null
const acquiredPermit = permit
const partNumber = currentPartNumber++
offset += partSize
const isFinalPart = size === offset
// biome-ignore lint/suspicious/noAsyncPromiseExecutor: it's fine
const deferred = new Promise<void>(async (resolve, reject) => {
try {
// Only the first chunk of each PATCH request can prepend
// an incomplete part (last chunk) from the previous request.
const readable = fs.createReadStream(path)
readable.on('error', reject)
if (partSize >= this.minPartSize || isFinalPart) {
await this.uploadPart(metadata, readable, partNumber)
} else {
await this.uploadIncompletePart(metadata.file.id, readable)
}
bytesUploaded += partSize
resolve()
} catch (error) {
reject(error)
} finally {
fsProm.rm(path).catch(() => {
/* ignore */
})
acquiredPermit?.release()
}
})
promises.push(deferred)
})
.on('chunkError', () => {
permit?.release()
})
try {
await streamProm.pipeline(readStream, splitterStream)
} catch (error) {
if (pendingChunkFilepath !== null) {
try {
await fsProm.rm(pendingChunkFilepath)
} catch {
log(`[${metadata.file.id}] failed to remove chunk ${pendingChunkFilepath}`)
}
}
promises.push(Promise.reject(error))
} finally {
await Promise.all(promises)
}
return bytesUploaded
}
/**
* Completes a multipart upload on S3.
* This is where S3 concatenates all the uploaded parts.
*/
private async finishMultipartUpload(metadata: MetadataValue, parts: Array<AWS.Part>) {
const response = await this.client.completeMultipartUpload({
Bucket: this.bucket,
Key: metadata.file.id,
UploadId: metadata['upload-id'],
MultipartUpload: {
Parts: parts.map((part) => {
return {
ETag: part.ETag,
PartNumber: part.PartNumber,
}
}),
},
})
return response.Location
}
/**
* Gets the number of complete parts/chunks already uploaded to S3.
* Retrieves only consecutive parts.
*/
private async retrieveParts(
id: string,
partNumberMarker?: string
): Promise<Array<AWS.Part>> {
const metadata = await this.getMetadata(id)
const params: AWS.ListPartsCommandInput = {
Bucket: this.bucket,
Key: id,
UploadId: metadata['upload-id'],
PartNumberMarker: partNumberMarker,
}
const data = await this.client.listParts(params)
let parts = data.Parts ?? []
if (data.IsTruncated) {
const rest = await this.retrieveParts(id, data.NextPartNumberMarker)
parts = [...parts, ...rest]
}
if (!partNumberMarker) {
// biome-ignore lint/style/noNonNullAssertion: it's fine
parts.sort((a, b) => a.PartNumber! - b.PartNumber!)
}
return parts
}
/**
* Removes cached data for a given file.
*/
private async clearCache(id: string) {
log(`[${id}] removing cached data`)
await this.cache.delete(id)
}
private calcOptimalPartSize(size?: number): number {
// When upload size is not know we assume largest possible value (`maxUploadSize`)
if (size === undefined) {
size = this.maxUploadSize
}
let optimalPartSize: number
// When upload is smaller or equal to PreferredPartSize, we upload in just one part.
if (size <= this.preferredPartSize) {
optimalPartSize = size
}
// Does the upload fit in MaxMultipartParts parts or less with PreferredPartSize.
else if (size <= this.preferredPartSize * this.maxMultipartParts) {
optimalPartSize = this.preferredPartSize
// The upload is too big for the preferred size.
// We devide the size with the max amount of parts and round it up.
} else {
optimalPartSize = Math.ceil(size / this.maxMultipartParts)
}
return optimalPartSize
}
/**
* Creates a multipart upload on S3 attaching any metadata to it.
* Also, a `${file_id}.info` file is created which holds some information
* about the upload itself like: `upload-id`, `upload-length`, etc.
*/
public async create(upload: Upload) {
log(`[${upload.id}] initializing multipart upload`)
const request: AWS.CreateMultipartUploadCommandInput = {
Bucket: this.bucket,
Key: upload.id,
Metadata: { 'tus-version': TUS_RESUMABLE },
}
if (upload.metadata?.contentType) {
request.ContentType = upload.metadata.contentType
}
if (upload.metadata?.cacheControl) {
request.CacheControl = upload.metadata.cacheControl
}
upload.creation_date = new Date().toISOString()
const res = await this.client.createMultipartUpload(request)
upload.storage = {
type: 's3',
path: res.Key as string,
bucket: this.bucket,
}
await this.saveMetadata(upload, res.UploadId as string)
log(`[${upload.id}] multipart upload created (${res.UploadId})`)
return upload
}
async read(id: string) {
const data = await this.client.getObject({
Bucket: this.bucket,
Key: id,
})
return data.Body as Readable
}
/**
* Write to the file, starting at the provided offset
*/
public async write(src: stream.Readable, id: string, offset: number): Promise<number> {
// Metadata request needs to happen first
const metadata = await this.getMetadata(id)
const parts = await this.retrieveParts(id)
// biome-ignore lint/style/noNonNullAssertion: it's fine
const partNumber: number = parts.length > 0 ? parts[parts.length - 1].PartNumber! : 0
const nextPartNumber = partNumber + 1
const incompletePart = await this.downloadIncompletePart(id)
const requestedOffset = offset
if (incompletePart) {
// once the file is on disk, we delete the incomplete part
await this.deleteIncompletePart(id)
offset = requestedOffset - incompletePart.size
src = new MultiStream([incompletePart.createReader({ cleanUpOnEnd: true }), src])
}
const bytesUploaded = await this.uploadParts(metadata, src, nextPartNumber, offset)
// The size of the incomplete part should not be counted, because the
// process of the incomplete part should be fully transparent to the user.
const newOffset = requestedOffset + bytesUploaded - (incompletePart?.size ?? 0)
if (metadata.file.size === newOffset) {
try {
const parts = await this.retrieveParts(id)
await this.finishMultipartUpload(metadata, parts)
await this.completeMetadata(metadata.file)
await this.clearCache(id)
} catch (error) {
log(`[${id}] failed to finish upload`, error)
throw error
}
}
return newOffset
}
public async getUpload(id: string): Promise<Upload> {
let metadata: MetadataValue
try {
metadata = await this.getMetadata(id)
} catch (error) {
log('getUpload: No file found.', error)
throw ERRORS.FILE_NOT_FOUND
}
let offset = 0
try {
const parts = await this.retrieveParts(id)
offset = calcOffsetFromParts(parts)
} catch (error: any) {
// Check if the error is caused by the upload not being found. This happens
// when the multipart upload has already been completed or aborted. Since
// we already found the info object, we know that the upload has been
// completed and therefore can ensure the the offset is the size.
// AWS S3 returns NoSuchUpload, but other implementations, such as DigitalOcean
// Spaces, can also return NoSuchKey.
if (error.Code === 'NoSuchUpload' || error.Code === 'NoSuchKey') {
return new Upload({
...metadata.file,
offset: metadata.file.size as number,
size: metadata.file.size,
metadata: metadata.file.metadata,
storage: metadata.file.storage,
})
}
log(error)
throw error
}
const incompletePartSize = await this.getIncompletePartSize(id)
return new Upload({
...metadata.file,
offset: offset + (incompletePartSize ?? 0),
size: metadata.file.size,
storage: metadata.file.storage,
})
}
public async declareUploadLength(file_id: string, upload_length: number) {
const { file, 'upload-id': uploadId } = await this.getMetadata(file_id)
if (!file) {
throw ERRORS.FILE_NOT_FOUND
}
file.size = upload_length
await this.saveMetadata(file, uploadId)
}
public async remove(id: string): Promise<void> {
try {
const { 'upload-id': uploadId } = await this.getMetadata(id)
if (uploadId) {
await this.client.abortMultipartUpload({
Bucket: this.bucket,
Key: id,
UploadId: uploadId,
})
}
} catch (error: any) {
if (error?.code && ['NotFound', 'NoSuchKey', 'NoSuchUpload'].includes(error.Code)) {
log('remove: No file found.', error)
throw ERRORS.FILE_NOT_FOUND
}
throw error
}
await this.client.deleteObjects({
Bucket: this.bucket,
Delete: {
Objects: [{ Key: id }, { Key: this.infoKey(id) }],
},
})
this.clearCache(id)
}
protected getExpirationDate(created_at: string) {
const date = new Date(created_at)
return new Date(date.getTime() + this.getExpiration())
}
getExpiration(): number {
return this.expirationPeriodInMilliseconds
}
async deleteExpired(): Promise<number> {
if (this.getExpiration() === 0) {
return 0
}
let keyMarker: string | undefined = undefined
let uploadIdMarker: string | undefined = undefined
let isTruncated = true
let deleted = 0
while (isTruncated) {
const listResponse: AWS.ListMultipartUploadsCommandOutput =
await this.client.listMultipartUploads({
Bucket: this.bucket,
KeyMarker: keyMarker,
UploadIdMarker: uploadIdMarker,
})
const expiredUploads =
listResponse.Uploads?.filter((multiPartUpload) => {
const initiatedDate = multiPartUpload.Initiated
return (
initiatedDate &&
new Date().getTime() >
this.getExpirationDate(initiatedDate.toISOString()).getTime()
)
}) || []
const objectsToDelete = expiredUploads.reduce(
(all, expiredUpload) => {
all.push(
{
key: this.infoKey(expiredUpload.Key as string),
},
{
key: this.partKey(expiredUpload.Key as string, true),
}
)
return all
},
[] as { key: string }[]
)
const deletions: Promise<AWS.DeleteObjectsCommandOutput>[] = []
// Batch delete 1000 items at a time
while (objectsToDelete.length > 0) {
const objects = objectsToDelete.splice(0, 1000)
deletions.push(
this.client.deleteObjects({
Bucket: this.bucket,
Delete: {
Objects: objects.map((object) => ({
Key: object.key,
})),
},
})
)
}
const [objectsDeleted] = await Promise.all([
Promise.all(deletions),
...expiredUploads.map((expiredUpload) => {
return this.client.abortMultipartUpload({
Bucket: this.bucket,
Key: expiredUpload.Key,
UploadId: expiredUpload.UploadId,
})
}),
])
deleted += objectsDeleted.reduce((all, acc) => all + (acc.Deleted?.length ?? 0), 0)
isTruncated = Boolean(listResponse.IsTruncated)
if (isTruncated) {
keyMarker = listResponse.NextKeyMarker
uploadIdMarker = listResponse.NextUploadIdMarker
}
}
return deleted
}
private async uniqueTmpFileName(template: string): Promise<string> {
let tries = 0
const maxTries = 10
while (tries < maxTries) {
const fileName =
template + crypto.randomBytes(10).toString('base64url').slice(0, 10)
const filePath = path.join(os.tmpdir(), fileName)
try {
await fsProm.lstat(filePath)
// If no error, file exists, so try again
tries++
} catch (e: any) {
if (e.code === 'ENOENT') {
// File does not exist, return the path
return filePath
}
throw e // For other errors, rethrow
}
}
throw new Error(`Could not find a unique file name after ${maxTries} tries`)
}
}

211
packages/tus/src/types.ts Normal file
View File

@ -0,0 +1,211 @@
/**
* @file tus协议服务端类型定义文件
* @description tus文件上传服务器所需的各种类型接口
* @version 1.0.0
*/
import type http from 'node:http'
import { Locker, Upload } from './utils'
/**
* tus服务器配置选项接口
* @interface ServerOptions
* @description tus服务器所需的所有选项
*/
export type ServerOptions = {
/**
*
* @example '/files'
*/
path: string
/**
*
* @param req HTTP请求对象
* @param uploadId ID
* @returns ()
*/
maxSize?:
| number
| ((req: http.IncomingMessage, uploadId: string | null) => Promise<number> | number)
/**
* URL作为Location响应头
* @description URL,true则返回相对URL
*/
relativeLocation?: boolean
/**
*
* @description 使ForwardedX-Forwarded-Proto和X-Forwarded-Host头
* Location头
*/
respectForwardedHeaders?: boolean
/**
* CORS允许的自定义请求头
* @description Access-Control-Allow-Headers响应头中
*/
allowedHeaders?: string[]
/**
*
* @description Access-Control-Allow-Credentials响应头
*/
allowedCredentials?: boolean
/**
* CORS允许的来源域名列表
* @description Access-Control-Allow-Origin响应头中
*/
allowedOrigins?: string[]
/**
* ()
* @description EVENTS.POST_RECEIVE_V2事件发送上传进度的时间间隔
*/
postReceiveInterval?: number
/**
* URL生成逻辑
* @param req HTTP请求对象
* @param options URL生成选项
* @returns URL
*/
generateUrl?: (
req: http.IncomingMessage,
options: { proto: string; host: string; path: string; id: string }
) => string
/**
* ID的逻辑
* @param req HTTP请求对象
* @param lastPath URL最后一段路径
* @returns ID
*/
getFileIdFromRequest?: (
req: http.IncomingMessage,
lastPath?: string
) => string | undefined
/**
*
* @description 使crypto.randomBytes(16).toString('hex')
* @param req HTTP请求对象
* @param metadata
* @returns
*/
namingFunction?: (
req: http.IncomingMessage,
metadata?: Record<string, string | null>
) => string | Promise<string>
/**
*
* @description 访
*/
locker:
| Locker
| Promise<Locker>
| ((req: http.IncomingMessage) => Locker | Promise<Locker>)
/**
* ()
* @description
*/
lockDrainTimeout?: number
/**
*
* @description true时无法删除已上传完成的文件
*/
disableTerminationForFinishedUploads?: boolean
/**
*
* @description ,
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param upload
* @throws
*/
onUploadCreate?: (
req: http.IncomingMessage,
res: http.ServerResponse,
upload: Upload
) => Promise<
http.ServerResponse | { res: http.ServerResponse; metadata?: Upload['metadata'] }
>
/**
*
* @description ,
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param upload
* @throws
*/
onUploadFinish?: (
req: http.IncomingMessage,
res: http.ServerResponse,
upload: Upload
) => Promise<
| http.ServerResponse
| {
res: http.ServerResponse
status_code?: number
headers?: Record<string, string | number>
body?: string
}
>
/**
*
* @description
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param uploadId ID
*/
onIncomingRequest?: (
req: http.IncomingMessage,
res: http.ServerResponse,
uploadId: string
) => Promise<void>
/**
*
* @description ,
* @param req HTTP请求对象
* @param res HTTP响应对象
* @param err
*/
onResponseError?: (
req: http.IncomingMessage,
res: http.ServerResponse,
err: Error | { status_code: number; body: string }
) =>
| Promise<{ status_code: number; body: string } | undefined>
| { status_code: number; body: string }
| undefined
}
/**
*
* @description HTTP请求处理函数类型定义
*/
export type RouteHandler = (req: http.IncomingMessage, res: http.ServerResponse) => void
/**
* 工具类型:使指定属性变为可选
* @template T
* @template K
*/
export type WithOptional<T, K extends keyof T> = Omit<T, K> & { [P in K]+?: T[P] }
/**
* 工具类型:使指定属性变为必需
* @template T
* @template K
*/
export type WithRequired<T, K extends keyof T> = T & { [P in K]-?: T[P] }

View File

@ -0,0 +1,132 @@
/**
* TUS协议相关的常量
* TUS是一种基于HTTP的可恢复文件上传协议
*/
// 定义TUS协议支持的HTTP请求方法
export const REQUEST_METHODS = ['POST', 'HEAD', 'PATCH', 'OPTIONS', 'DELETE'] as const
// 定义TUS协议中使用的HTTP头部信息
export const HEADERS = [
'Authorization',
'Content-Type',
'Location',
'Tus-Extension',
'Tus-Max-Size',
'Tus-Resumable',
'Tus-Version',
'Upload-Concat',
'Upload-Defer-Length',
'Upload-Length',
'Upload-Metadata',
'Upload-Offset',
'X-HTTP-Method-Override',
'X-Requested-With',
'X-Forwarded-Host',
'X-Forwarded-Proto',
'Forwarded',
] as const
// 将头部信息转换为小写形式,便于处理
export const HEADERS_LOWERCASE = HEADERS.map((header) => {
return header.toLowerCase()
}) as Array<Lowercase<(typeof HEADERS)[number]>>
// 定义允许的头部信息、请求方法和暴露的头部信息
export const ALLOWED_HEADERS = HEADERS.join(', ')
export const ALLOWED_METHODS = REQUEST_METHODS.join(', ')
export const EXPOSED_HEADERS = HEADERS.join(', ')
// 定义TUS协议中可能遇到的错误信息
export const ERRORS = {
MISSING_OFFSET: {
status_code: 403,
body: 'Upload-Offset header required\n',
},
ABORTED: {
status_code: 400,
body: 'Request aborted due to lock acquired',
},
INVALID_TERMINATION: {
status_code: 400,
body: 'Cannot terminate an already completed upload',
},
ERR_LOCK_TIMEOUT: {
status_code: 500,
body: 'failed to acquire lock before timeout',
},
INVALID_CONTENT_TYPE: {
status_code: 403,
body: 'Content-Type header required\n',
},
FILE_NOT_FOUND: {
status_code: 404,
body: 'The file for this url was not found\n',
},
INVALID_OFFSET: {
status_code: 409,
body: 'Upload-Offset conflict\n',
},
FILE_NO_LONGER_EXISTS: {
status_code: 410,
body: 'The file for this url no longer exists\n',
},
ERR_SIZE_EXCEEDED: {
status_code: 413,
body: "upload's size exceeded\n",
},
ERR_MAX_SIZE_EXCEEDED: {
status_code: 413,
body: 'Maximum size exceeded\n',
},
INVALID_LENGTH: {
status_code: 400,
body: 'Upload-Length or Upload-Defer-Length header required\n',
},
INVALID_METADATA: {
status_code: 400,
body: 'Upload-Metadata is invalid. It MUST consist of one or more comma-separated key-value pairs. The key and value MUST be separated by a space. The key MUST NOT contain spaces and commas and MUST NOT be empty. The key SHOULD be ASCII encoded and the value MUST be Base64 encoded. All keys MUST be unique',
},
UNKNOWN_ERROR: {
status_code: 500,
body: 'Something went wrong with that request\n',
},
FILE_WRITE_ERROR: {
status_code: 500,
body: 'Something went wrong receiving the file\n',
},
UNSUPPORTED_CONCATENATION_EXTENSION: {
status_code: 501,
body: 'Concatenation extension is not (yet) supported. Disable parallel uploads in the tus client.\n',
},
UNSUPPORTED_CREATION_DEFER_LENGTH_EXTENSION: {
status_code: 501,
body: 'creation-defer-length extension is not (yet) supported.\n',
},
UNSUPPORTED_EXPIRATION_EXTENSION: {
status_code: 501,
body: 'expiration extension is not (yet) supported.\n',
},
} as const
// 定义TUS协议中的事件类型
export const POST_CREATE = 'POST_CREATE' as const
/** @deprecated this is almost the same as POST_FINISH, use POST_RECEIVE_V2 instead */
export const POST_RECEIVE = 'POST_RECEIVE' as const
export const POST_RECEIVE_V2 = 'POST_RECEIVE_V2' as const
export const POST_FINISH = 'POST_FINISH' as const
export const POST_TERMINATE = 'POST_TERMINATE' as const
export const EVENTS = {
POST_CREATE,
/** @deprecated this is almost the same as POST_FINISH, use POST_RECEIVE_V2 instead */
POST_RECEIVE,
POST_RECEIVE_V2,
POST_FINISH,
POST_TERMINATE,
} as const
// 定义TUS协议中的最大年龄和版本信息
export const MAX_AGE = 86_400 as const
export const TUS_RESUMABLE = '1.0.0' as const
export const TUS_VERSION = ['1.0.0'] as const

View File

@ -0,0 +1,3 @@
export * from './models'
export * from './constants'
export * from './kvstores'

View File

@ -0,0 +1,94 @@
import fs from 'node:fs/promises'
import path from 'node:path'
import type {KvStore} from './Types'
import type {Upload} from '../models'
/**
* FileKvStore
*
* @description
* @remarks
* - JSON元数据存储在磁盘上
* - 使
*
* @typeparam T Upload类型
*/
export class FileKvStore<T = Upload> implements KvStore<T> {
/** 存储目录路径 */
directory: string
/**
*
*
* @param path
*/
constructor(path: string) {
this.directory = path
}
/**
*
*
* @param key
* @returns undefined
*/
async get(key: string): Promise<T | undefined> {
try {
// 读取对应键的JSON文件
const buffer = await fs.readFile(this.resolve(key), 'utf8')
// 解析JSON并返回
return JSON.parse(buffer as string)
} catch {
// 文件不存在或读取失败时返回undefined
return undefined
}
}
/**
*
* @param key
* @param value
*/
async set(key: string, value: T): Promise<void> {
// 将值转换为JSON并写入文件
await fs.writeFile(this.resolve(key), JSON.stringify(value))
}
/**
*
*
* @param key
*/
async delete(key: string): Promise<void> {
// 删除对应的JSON文件
await fs.rm(this.resolve(key))
}
/**
*
*
* @returns
*/
async list(): Promise<Array<string>> {
// 读取目录中的所有文件
const files = await fs.readdir(this.directory)
// 对文件名进行排序
const sorted = files.sort((a, b) => a.localeCompare(b))
// 提取文件名(不包含扩展名)
const name = (file: string) => path.basename(file, '.json')
// 过滤出有效的tus文件ID
// 仅保留成对出现的文件(文件名相同,一个有.json扩展名
return sorted.filter(
(file, idx) => idx < sorted.length - 1 && name(file) === name(sorted[idx + 1])
)
}
/**
*
*
* @param key
* @returns
* @private
*/
private resolve(key: string): string {
// 将键名转换为完整的JSON文件路径
return path.resolve(this.directory, `${key}.json`)
}
}

View File

@ -0,0 +1,54 @@
import type {Redis as IoRedis} from 'ioredis'
import type {KvStore} from './Types'
import type {Upload} from '../models'
export class IoRedisKvStore<T = Upload> implements KvStore<T> {
constructor(
private redis: IoRedis,
private prefix = ''
) {
this.redis = redis
this.prefix = prefix
}
private prefixed(key: string): string {
return `${this.prefix}${key}`
}
async get(key: string): Promise<T | undefined> {
return this.deserializeValue(await this.redis.get(this.prefixed(key)))
}
async set(key: string, value: T): Promise<void> {
await this.redis.set(this.prefixed(key), this.serializeValue(value))
}
async delete(key: string): Promise<void> {
await this.redis.del(this.prefixed(key))
}
async list(): Promise<Array<string>> {
const keys = new Set<string>()
let cursor = '0'
do {
const [next, batch] = await this.redis.scan(
cursor,
'MATCH',
this.prefixed('*'),
'COUNT',
'20'
)
cursor = next
for (const key of batch) keys.add(key)
} while (cursor !== '0')
return Array.from(keys)
}
private serializeValue(value: T): string {
return JSON.stringify(value)
}
private deserializeValue(buffer: string | null): T | undefined {
return buffer ? JSON.parse(buffer) : undefined
}
}

View File

@ -0,0 +1,26 @@
import type {Upload} from '../models'
import type {KvStore} from './Types'
/**
* Memory based configstore.
* Used mostly for unit tests.
*/
export class MemoryKvStore<T = Upload> implements KvStore<T> {
data: Map<string, T> = new Map()
async get(key: string): Promise<T | undefined> {
return this.data.get(key)
}
async set(key: string, value: T): Promise<void> {
this.data.set(key, value)
}
async delete(key: string): Promise<void> {
this.data.delete(key)
}
async list(): Promise<Array<string>> {
return [...this.data.keys()]
}
}

View File

@ -0,0 +1,94 @@
import type { RedisClientType } from '@redis/client'
import type { KvStore } from './Types'
import type { Upload } from '../models'
/**
* Redis
* KvStore 使 Redis
*
* 使
* -
* -
*
* @author Mitja Puzigaća <mitjap@gmail.com>
*/
export class RedisKvStore<T = Upload> implements KvStore<T> {
/**
* RedisKvStore
*
* @param redis Redis Redis
* @param prefix
*/
constructor(
private redis: RedisClientType,
private prefix = ''
) {
this.redis = redis
this.prefix = prefix
}
/**
*
*
* @param key
* @returns undefined
*/
async get(key: string): Promise<T | undefined> {
return this.deserializeValue(await this.redis.get(this.prefix + key))
}
/**
*
*
* @param key
* @param value
*/
async set(key: string, value: T): Promise<void> {
await this.redis.set(this.prefix + key, this.serializeValue(value))
}
/**
*
*
* @param key
*/
async delete(key: string): Promise<void> {
await this.redis.del(this.prefix + key)
}
/**
*
*
* @returns
*/
async list(): Promise<Array<string>> {
const keys = new Set<string>()
let cursor = 0
do {
const result = await this.redis.scan(cursor, { MATCH: `${this.prefix}*`, COUNT: 20 })
cursor = result.cursor
for (const key of result.keys) keys.add(key)
} while (cursor !== 0)
return Array.from(keys)
}
/**
*
*
* @param value
* @returns
*/
private serializeValue(value: T): string {
return JSON.stringify(value)
}
/**
*
*
* @param buffer
* @returns undefined
*/
private deserializeValue(buffer: string | null): T | undefined {
return buffer ? JSON.parse(buffer) : undefined
}
}

View File

@ -0,0 +1,43 @@
/**
*
* @description
* @module KvStore
* @remarks
*/
import type { Upload } from '../models'
/**
*
* @template T Upload
* @description
* @interface
*/
export interface KvStore<T = Upload> {
/**
*
* @param key
* @returns undefined
*/
get(key: string): Promise<T | undefined>
/**
*
* @param key
* @param value
* @returns
*/
set(key: string, value: T): Promise<void>
/**
*
* @param key
* @returns
*/
delete(key: string): Promise<void>
/**
*
* @returns
*/
list?(): Promise<Array<string>>
}

View File

@ -0,0 +1,5 @@
export { FileKvStore } from './FileKvStore'
export { MemoryKvStore } from './MemoryKvStore'
export { RedisKvStore } from './RedisKvStore'
export { IoRedisKvStore } from './IoRedisKvStore'
export type { KvStore } from './Types'

View File

@ -0,0 +1,14 @@
/**
* CancellationContext接口
*
*
* 使
* - /
* -
* -
*/
export interface CancellationContext {
signal: AbortSignal
abort: () => void
cancel: () => void
}

View File

@ -0,0 +1,72 @@
import EventEmitter from 'node:events'
import {Upload} from './Upload'
import type stream from 'node:stream'
import type http from 'node:http'
export class DataStore extends EventEmitter {
extensions: string[] = []
hasExtension(extension: string) {
return this.extensions?.includes(extension)
}
/**
* Called in POST requests. This method just creates a
* file, implementing the creation extension.
*
* http://tus.io/protocols/resumable-upload.html#creation
*/
async create(file: Upload) {
return file
}
/**
* Called in DELETE requests. This method just deletes the file from the store.
* http://tus.io/protocols/resumable-upload.html#termination
*/
async remove(id: string) {}
/**
* Called in PATCH requests. This method should write data
* to the DataStore file, and possibly implement the
* concatenation extension.
*
* http://tus.io/protocols/resumable-upload.html#concatenation
*/
async write(
stream: http.IncomingMessage | stream.Readable,
id: string,
offset: number
) {
return 0
}
/**
* Called in HEAD requests. This method should return the bytes
* written to the DataStore, for the client to know where to resume
* the upload.
*/
async getUpload(id: string): Promise<Upload> {
return new Upload({
id,
size: 0,
offset: 0,
storage: {type: 'datastore', path: ''},
})
}
/**
* Called in PATCH requests when upload length is known after being defered.
*/
async declareUploadLength(id: string, upload_length: number) {}
/**
* Returns number of expired uploads that were deleted.
*/
async deleteExpired(): Promise<number> {
return 0
}
getExpiration(): number {
return 0
}
}

View File

@ -0,0 +1,12 @@
export type RequestRelease = () => Promise<void> | void
export interface Locker {
newLock(id: string): Lock
}
export interface Lock {
lock(cancelReq: RequestRelease): Promise<void>
unlock(): Promise<void>
}

View File

@ -0,0 +1,103 @@
import type {Upload} from './Upload'
// 定义ASCII码中的空格和逗号字符的码点
const ASCII_SPACE = ' '.codePointAt(0)
const ASCII_COMMA = ','.codePointAt(0)
// 定义用于验证Base64字符串的正则表达式
const BASE64_REGEX = /^[\d+/A-Za-z]*={0,2}$/
/**
*
* @param key
* @returns truefalse
*/
export function validateKey(key: string) {
// 如果键的长度为0则无效
if (key.length === 0) {
return false
}
// 遍历键的每个字符,检查其码点是否在有效范围内
for (let i = 0; i < key.length; ++i) {
const charCodePoint = key.codePointAt(i) as number
if (
charCodePoint > 127 || // 非ASCII字符
charCodePoint === ASCII_SPACE || // 空格字符
charCodePoint === ASCII_COMMA // 逗号字符
) {
return false
}
}
return true
}
/**
*
* @param value
* @returns Base64字符串则返回truefalse
*/
export function validateValue(value: string) {
// Base64字符串的长度必须是4的倍数
if (value.length % 4 !== 0) {
return false
}
// 使用正则表达式验证Base64字符串的格式
return BASE64_REGEX.test(value)
}
/**
*
* @param str
* @returns
* @throws
*/
export function parse(str?: string) {
const meta: Record<string, string | null> = {}
// 如果字符串为空或仅包含空白字符,则无效
if (!str || str.trim().length === 0) {
throw new Error('Metadata string is not valid')
}
// 遍历字符串中的每个键值对
for (const pair of str.split(',')) {
const tokens = pair.split(' ')
const [key, value] = tokens
// 验证键和值的有效性,并确保键在元数据对象中不存在
if (
((tokens.length === 1 && validateKey(key)) ||
(tokens.length === 2 && validateKey(key) && validateValue(value))) &&
!(key in meta)
) {
// 如果值存在则将其从Base64解码为UTF-8字符串
const decodedValue = value ? Buffer.from(value, 'base64').toString('utf8') : null
meta[key] = decodedValue
} else {
throw new Error('Metadata string is not valid')
}
}
return meta
}
/**
*
* @param metadata
* @returns
*/
export function stringify(metadata: NonNullable<Upload['metadata']>): string {
return Object.entries(metadata)
.map(([key, value]) => {
// 如果值为null则仅返回键
if (value === null) {
return key
}
// 将值编码为Base64字符串并与键组合
const encodedValue = Buffer.from(value, 'utf8').toString('base64')
return `${key} ${encodedValue}`
})
.join(',')
}

View File

@ -0,0 +1,54 @@
import { Transform, type TransformCallback } from 'node:stream'
import { ERRORS } from '../constants'
// TODO: create HttpError and use it everywhere instead of throwing objects
/**
* MaxFileExceededError
* Error
*/
export class MaxFileExceededError extends Error {
status_code: number
body: string
constructor() {
super(ERRORS.ERR_MAX_SIZE_EXCEEDED.body)
this.status_code = ERRORS.ERR_MAX_SIZE_EXCEEDED.status_code
this.body = ERRORS.ERR_MAX_SIZE_EXCEEDED.body
Object.setPrototypeOf(this, MaxFileExceededError.prototype)
}
}
/**
* StreamLimiter
* Transform
*/
export class StreamLimiter extends Transform {
private maxSize: number // 允许的最大流大小
private currentSize = 0 // 当前流的大小
/**
* StreamLimiter
* @param maxSize
*/
constructor(maxSize: number) {
super()
this.maxSize = maxSize
}
/**
* _transform Transform
*
* MaxFileExceededError
* @param chunk
* @param encoding
* @param callback
*/
_transform(chunk: Buffer, encoding: BufferEncoding, callback: TransformCallback): void {
this.currentSize += chunk.length // 更新当前流的大小
if (this.currentSize > this.maxSize) {
callback(new MaxFileExceededError()) // 如果超出最大限制,抛出错误
} else {
callback(null, chunk) // 否则,继续处理数据块
}
}
}

View File

@ -0,0 +1,183 @@
/* global BufferEncoding */
import crypto from 'node:crypto'
import fs from 'node:fs/promises'
import path from 'node:path'
import stream from 'node:stream'
/**
*
* @param size
* @returns
*/
function randomString(size: number) {
return crypto.randomBytes(size).toString('base64url').slice(0, size)
}
/**
* StreamSplitter
*/
type Options = {
chunkSize: number // 每个块的大小
directory: string // 存储块的目录
}
/**
*
*/
type Callback = (error: Error | null) => void
/**
* StreamSplitter
*/
export class StreamSplitter extends stream.Writable {
directory: Options['directory'] // 存储块的目录
currentChunkPath: string | null // 当前块的路径
currentChunkSize: number // 当前块的大小
fileHandle: fs.FileHandle | null // 当前块的文件句柄
filenameTemplate: string // 文件名模板
chunkSize: Options['chunkSize'] // 每个块的大小
part: number // 当前块的编号
/**
*
* @param chunkSize
* @param directory
* @param options
*/
constructor({ chunkSize, directory }: Options, options?: stream.WritableOptions) {
super(options)
this.chunkSize = chunkSize
this.currentChunkPath = null
this.currentChunkSize = 0
this.fileHandle = null
this.directory = directory
this.filenameTemplate = randomString(10)
this.part = 0
this.on('error', this._handleError.bind(this))
}
/**
*
* @param chunk
* @param _ 使
* @param callback
*/
async _write(chunk: Buffer, _: BufferEncoding, callback: Callback) {
try {
// 如果当前没有文件句柄,则创建一个新的块
if (this.fileHandle === null) {
await this._newChunk()
}
let overflow = this.currentChunkSize + chunk.length - this.chunkSize
// 如果写入的数据会导致当前块超过指定大小,则进行分割
while (overflow > 0) {
// 只写入不超过指定大小的部分
await this._writeChunk(chunk.subarray(0, chunk.length - overflow))
await this._finishChunk()
// 剩余的数据写入新的块
await this._newChunk()
chunk = chunk.subarray(chunk.length - overflow, chunk.length)
overflow = this.currentChunkSize + chunk.length - this.chunkSize
}
// 如果数据块小于指定大小,则直接写入
await this._writeChunk(chunk)
callback(null)
} catch (error: any) {
callback(error)
}
}
/**
*
* @param callback
*/
async _final(callback: Callback) {
if (this.fileHandle === null) {
callback(null)
return
}
try {
await this._finishChunk()
callback(null)
} catch (error: any) {
callback(error)
}
}
/**
*
* @param chunk
*/
async _writeChunk(chunk: Buffer): Promise<void> {
await fs.appendFile(this.fileHandle as fs.FileHandle, chunk)
this.currentChunkSize += chunk.length
}
/**
*
*/
async _handleError() {
await this.emitEvent('chunkError', this.currentChunkPath)
// 如果发生错误,停止写入操作,防止数据丢失
if (this.fileHandle === null) { return }
await this.fileHandle.close()
this.currentChunkPath = null
this.fileHandle = null
}
/**
*
*/
async _finishChunk(): Promise<void> {
if (this.fileHandle === null) {
return
}
await this.fileHandle.close()
await this.emitEvent('chunkFinished', {
path: this.currentChunkPath,
size: this.currentChunkSize,
})
this.currentChunkPath = null
this.fileHandle = null
this.currentChunkSize = 0
this.part += 1
}
/**
*
* @param name
* @param payload
*/
async emitEvent<T>(name: string, payload: T) {
const listeners = this.listeners(name)
for (const listener of listeners) {
await listener(payload)
}
}
/**
*
*/
async _newChunk(): Promise<void> {
const currentChunkPath = path.join(
this.directory,
`${this.filenameTemplate}-${this.part}`
)
await this.emitEvent('beforeChunkStarted', currentChunkPath)
this.currentChunkPath = currentChunkPath
const fileHandle = await fs.open(this.currentChunkPath, 'w')
await this.emitEvent('chunkStarted', this.currentChunkPath)
this.currentChunkSize = 0
this.fileHandle = fileHandle
}
}

View File

@ -0,0 +1,21 @@
import crypto from 'node:crypto'
/**
* Uid
*
* UID
* 使ID
*/
export const Uid = {
/**
*
*
* 使 Node.js crypto 16
*
*
* @returns {string} 32
*/
rand(): string {
return crypto.randomBytes(16).toString('hex')
},
}

View File

@ -0,0 +1,72 @@
/**
* 模块: Upload
* 文件功能描述: 该模块定义了上传文件的数据模型
* 使用场景: 用于管理文件上传过程中的元数据和状态Web应用或服务
*/
/**
* 类型: TUpload
* 核心功能概述: 定义了上传文件的数据结构ID
*/
type TUpload = {
id: string // 文件唯一标识符
size?: number // 文件大小,可选
offset: number // 文件上传的偏移量
metadata?: Record<string, string | null> // 文件的元数据,可选
storage?: { // 文件的存储信息,可选
type: string // 存储类型
path: string // 存储路径
bucket?: string // 存储桶,可选
}
creation_date?: string // 文件创建日期,可选
}
/**
* : Upload
* 核心功能概述: 封装了上传文件的数据模型访
* 设计模式解析: 使用构造函数模式初始化对象getter方法提供属性访问
* 使:
* const upload = new Upload({ id: '123', size: 1024, offset: 0 });
* console.log(upload.sizeIsDeferred); // 检查文件大小是否延迟
*/
export class Upload {
id: TUpload['id'] // 文件ID
metadata: TUpload['metadata'] // 文件元数据
size: TUpload['size'] // 文件大小
offset: TUpload['offset'] // 文件上传偏移量
creation_date: TUpload['creation_date'] // 文件创建日期
storage: TUpload['storage'] // 文件存储信息
/**
*
* 功能详细描述: 初始化Upload对象ID属性
* :
* - upload: TUpload类型
* 异常处理机制: 如果未提供ID
*/
constructor(upload: TUpload) {
// 检查ID是否存在不存在则抛出错误
if (!upload.id) {
throw new Error('[File] constructor must be given an ID')
}
// 初始化属性
this.id = upload.id
this.size = upload.size
this.offset = upload.offset
this.metadata = upload.metadata
this.storage = upload.storage
// 如果未提供创建日期,则设置为当前时间
this.creation_date = upload.creation_date ?? new Date().toISOString()
}
/**
* 方法: sizeIsDeferred
* 功能详细描述: 检查文件大小是否未定义
* 返回值说明: 返回布尔值true表示文件大小未定义false表示已定义
*/
get sizeIsDeferred(): boolean {
return this.size === undefined
}
}

View File

@ -0,0 +1,8 @@
export { DataStore } from './DataStore'
export * as Metadata from './Metadata'
export { StreamSplitter } from './StreamSplitter'
export { StreamLimiter } from './StreamLimiter'
export { Uid } from './Uid'
export { Upload } from './Upload'
export type { Locker, Lock, RequestRelease } from './Locker'
export type { CancellationContext } from './Context'

View File

@ -0,0 +1,138 @@
/**
* TUS协议头部验证器
*
* TUS协议中各种HTTP头部的验证逻辑
* TUS是一个用于可恢复文件上传的开放协议
*
* @version 1.0.0
* @see https://tus.io/protocols/resumable-upload.html
*/
import { Metadata, TUS_VERSION, TUS_RESUMABLE } from "../utils"
/** 验证器函数类型定义,接收可选的字符串值,返回布尔值表示验证结果 */
type validator = (value?: string) => boolean
/**
* TUS协议头部验证器映射表
* TUS协议规定的HTTP头部的验证规则
*/
export const validators = new Map<string, validator>([
[
'upload-offset',
/**
* Upload-Offset头部验证
*
*
*/
(value) => {
const n = Number(value)
return Number.isInteger(n) && String(n) === value && n >= 0
},
],
[
'upload-length',
/**
* Upload-Length头部验证
*
*
*/
(value) => {
const n = Number(value)
return Number.isInteger(n) && String(n) === value && n >= 0
},
],
[
'upload-defer-length',
/**
* Upload-Defer-Length头部验证
* ,
* 1,
*/
(value) => value === '1',
],
[
'upload-metadata',
/**
* Upload-Metadata头部验证
*
*
*
* ASCII编码,Base64编码
*
*/
(value) => {
try {
Metadata.parse(value)
return true
} catch {
return false
}
},
],
[
'x-forwarded-proto',
/**
* X-Forwarded-Proto头部验证
*
* http或https
*/
(value) => {
if (value === 'http' || value === 'https') {
return true
}
return false
},
],
[
'tus-version',
/**
* Tus-Version头部验证
*
* ,
*/
(value) => {
return TUS_VERSION.includes(value as any)
},
],
[
'tus-resumable',
/**
* Tus-Resumable头部验证
* OPTIONS请求外的每个请求和响应都必须包含
* 使
*/
(value) => value === TUS_RESUMABLE,
],
['content-type', (value) => value === 'application/offset+octet-stream'],
[
'upload-concat',
/**
* Upload-Concat头部验证
*
* partial
* final开头,URL列表
*/
(value) => {
if (!value) return false
const valid_partial = value === 'partial'
const valid_final = value.startsWith('final;')
return valid_partial || valid_final
},
],
])
/**
* HTTP头部值是否符合TUS协议规范
* @param name
* @param value
* @returns
*/
export function validateHeader(name: string, value?: string): boolean {
const lowercaseName = name.toLowerCase()
if (!validators.has(lowercaseName)) {
return true
}
return validators.get(lowercaseName)!(value)
}

View File

@ -0,0 +1,40 @@
{
"compilerOptions": {
"target": "es2022",
"module": "esnext",
"lib": [
"DOM",
"es2022"
],
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"moduleResolution": "node",
"removeComments": true,
"skipLibCheck": true,
"strict": true,
"isolatedModules": true,
"esModuleInterop": true,
"noUnusedLocals": false,
"noUnusedParameters": false,
"noImplicitReturns": false,
"noFallthroughCasesInSwitch": false,
"noUncheckedIndexedAccess": false,
"noImplicitOverride": false,
"noPropertyAccessFromIndexSignature": false,
"emitDeclarationOnly": true,
"outDir": "dist",
"incremental": true,
"tsBuildInfoFile": "./dist/tsconfig.tsbuildinfo"
},
"include": [
"src"
],
"exclude": [
"node_modules",
"dist",
"**/*.test.ts",
"**/*.spec.ts",
"**/__tests__"
]
}

View File

@ -0,0 +1,10 @@
import { defineConfig } from 'tsup';
export default defineConfig({
entry: ['src/index.ts'],
format: ['cjs', 'esm'],
splitting: false,
sourcemap: true,
clean: false,
dts: true
});

File diff suppressed because it is too large Load Diff

View File

@ -1,67 +1,48 @@
{
"$schema": "https://turbo.build/schema.json",
"globalDependencies": [
"**/.env.*local"
],
"tasks": {
"dev": {
"dependsOn": ["^db:generate"],
"cache": false,
"persistent": true
},
"build": {
"dependsOn": ["^build", "^db:generate"],
"inputs": [
"$TURBO_DEFAULT$",
".env*"
],
"outputs": [
"dist/**",
".next/**",
"!.next/cache/**"
]
},
"lint": {
"dependsOn": [
"^lint"
]
},
"check-types": {
"dependsOn": [
"^check-types"
]
},
"db:generate": {
"cache": false
},
"db:migrate": {
"cache": false,
"persistent": true
},
"db:deploy": {
"cache": false
},
"db:push": {
"cache": false
},
"db:seed": {
"cache": false
},
"generate": {
"dependsOn": [
"^generate"
],
"cache": false
},
"test": {
"outputs": [
"coverage/**"
]
},
"test:e2e": {
"outputs": [
"coverage-e2e/**"
]
}
}
}
"$schema": "https://turbo.build/schema.json",
"globalDependencies": ["**/.env.*local"],
"tasks": {
"dev": {
"dependsOn": ["^db:generate"],
"cache": false,
"persistent": true
},
"build": {
"dependsOn": ["^build", "^db:generate"],
"inputs": ["$TURBO_DEFAULT$", ".env*"],
"outputs": ["dist/**", ".next/**", "!.next/cache/**"]
},
"lint": {
"dependsOn": ["^lint"]
},
"check-types": {
"dependsOn": ["^check-types"]
},
"db:generate": {
"cache": false
},
"db:migrate": {
"cache": false,
"persistent": true
},
"db:deploy": {
"cache": false
},
"db:push": {
"cache": false
},
"db:seed": {
"cache": false
},
"generate": {
"dependsOn": ["^generate"],
"cache": false
},
"test": {
"outputs": ["coverage/**"]
},
"test:e2e": {
"outputs": ["coverage-e2e/**"]
}
}
}