⚗️ Experimental new file upload API
This commit is contained in:
@@ -50,7 +50,6 @@ public class DeveloperController(
|
||||
public async Task<ActionResult<List<Developer>>> ListJoinedDevelopers()
|
||||
{
|
||||
if (HttpContext.Items["CurrentUser"] is not Account currentUser) return Unauthorized();
|
||||
var accountId = Guid.Parse(currentUser.Id);
|
||||
|
||||
var pubResponse = await ps.ListPublishersAsync(new ListPublishersRequest { AccountId = currentUser.Id });
|
||||
var pubIds = pubResponse.Publishers.Select(p => p.Id).Select(Guid.Parse).ToList();
|
||||
|
@@ -22,6 +22,7 @@
|
||||
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
|
||||
</PackageReference>
|
||||
<PackageReference Include="Minio" Version="6.0.5" />
|
||||
<PackageReference Include="Nanoid" Version="3.1.0" />
|
||||
<PackageReference Include="Nerdbank.GitVersioning" Version="3.7.115">
|
||||
<PrivateAssets>all</PrivateAssets>
|
||||
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
|
||||
|
266
DysonNetwork.Drive/Storage/FileUploadController.cs
Normal file
266
DysonNetwork.Drive/Storage/FileUploadController.cs
Normal file
@@ -0,0 +1,266 @@
|
||||
using System.Text.Json;
|
||||
using DysonNetwork.Drive.Billing;
|
||||
using DysonNetwork.Drive.Storage.Model;
|
||||
using DysonNetwork.Shared.Auth;
|
||||
using DysonNetwork.Shared.Proto;
|
||||
using Microsoft.AspNetCore.Authorization;
|
||||
using Microsoft.AspNetCore.Mvc;
|
||||
using Microsoft.EntityFrameworkCore;
|
||||
using NanoidDotNet;
|
||||
|
||||
namespace DysonNetwork.Drive.Storage;
|
||||
|
||||
[ApiController]
|
||||
[Route("/api/files/upload")]
|
||||
[Authorize]
|
||||
public class FileUploadController(
|
||||
IConfiguration configuration,
|
||||
FileService fileService,
|
||||
AppDatabase db,
|
||||
PermissionService.PermissionServiceClient permission,
|
||||
QuotaService quotaService
|
||||
)
|
||||
: ControllerBase
|
||||
{
|
||||
private readonly string _tempPath =
|
||||
Path.Combine(configuration.GetValue<string>("Storage:Uploads") ?? Path.GetTempPath(), "multipart-uploads");
|
||||
|
||||
private const long DefaultChunkSize = 1024 * 1024 * 5; // 5MB
|
||||
|
||||
[HttpPost("create")]
|
||||
public async Task<IActionResult> CreateUploadTask([FromBody] CreateUploadTaskRequest request)
|
||||
{
|
||||
if (HttpContext.Items["CurrentUser"] is not Account currentUser) return Unauthorized();
|
||||
|
||||
if (!currentUser.IsSuperuser)
|
||||
{
|
||||
var allowed = await permission.HasPermissionAsync(new HasPermissionRequest
|
||||
{ Actor = $"user:{currentUser.Id}", Area = "global", Key = "files.create" });
|
||||
if (!allowed.HasPermission)
|
||||
{
|
||||
return Forbid();
|
||||
}
|
||||
}
|
||||
|
||||
if (!Guid.TryParse(request.PoolId, out var poolGuid))
|
||||
{
|
||||
return BadRequest("Invalid file pool id");
|
||||
}
|
||||
|
||||
var pool = await fileService.GetPoolAsync(poolGuid);
|
||||
if (pool is null)
|
||||
{
|
||||
return BadRequest("Pool not found");
|
||||
}
|
||||
|
||||
if (pool.PolicyConfig.RequirePrivilege > 0)
|
||||
{
|
||||
if (currentUser.PerkSubscription is null)
|
||||
{
|
||||
return new ObjectResult("You need to have join the Stellar Program to use this pool")
|
||||
{ StatusCode = 403 };
|
||||
}
|
||||
|
||||
var privilege =
|
||||
PerkSubscriptionPrivilege.GetPrivilegeFromIdentifier(currentUser.PerkSubscription.Identifier);
|
||||
if (privilege < pool.PolicyConfig.RequirePrivilege)
|
||||
{
|
||||
return new ObjectResult(
|
||||
$"You need Stellar Program tier {pool.PolicyConfig.RequirePrivilege} to use this pool, you are tier {privilege}")
|
||||
{
|
||||
StatusCode = 403
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (!string.IsNullOrEmpty(request.BundleId) && !Guid.TryParse(request.BundleId, out _))
|
||||
{
|
||||
return BadRequest("Invalid file bundle id");
|
||||
}
|
||||
|
||||
var policy = pool.PolicyConfig;
|
||||
if (!policy.AllowEncryption && !string.IsNullOrEmpty(request.EncryptPassword))
|
||||
{
|
||||
return new ObjectResult("File encryption is not allowed in this pool") { StatusCode = 403 };
|
||||
}
|
||||
|
||||
if (policy.AcceptTypes is { Count: > 0 })
|
||||
{
|
||||
if (string.IsNullOrEmpty(request.ContentType))
|
||||
{
|
||||
return BadRequest("Content type is required by the pool's policy");
|
||||
}
|
||||
|
||||
var foundMatch = policy.AcceptTypes.Any(acceptType =>
|
||||
{
|
||||
if (acceptType.EndsWith("/*", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
var type = acceptType[..^2];
|
||||
return request.ContentType.StartsWith($"{type}/", StringComparison.OrdinalIgnoreCase);
|
||||
}
|
||||
|
||||
return acceptType.Equals(request.ContentType, StringComparison.OrdinalIgnoreCase);
|
||||
});
|
||||
|
||||
if (!foundMatch)
|
||||
{
|
||||
return new ObjectResult($"Content type {request.ContentType} is not allowed by the pool's policy")
|
||||
{ StatusCode = 403 };
|
||||
}
|
||||
}
|
||||
|
||||
if (policy.MaxFileSize is not null && request.FileSize > policy.MaxFileSize)
|
||||
{
|
||||
return new ObjectResult(
|
||||
$"File size {request.FileSize} is larger than the pool's maximum file size {policy.MaxFileSize}")
|
||||
{
|
||||
StatusCode = 403
|
||||
};
|
||||
}
|
||||
|
||||
var (ok, billableUnit, quota) = await quotaService.IsFileAcceptable(
|
||||
Guid.Parse(currentUser.Id),
|
||||
pool.BillingConfig.CostMultiplier ?? 1.0,
|
||||
request.FileSize
|
||||
);
|
||||
if (!ok)
|
||||
{
|
||||
return new ObjectResult($"File size {billableUnit} MiB is exceeded the user's quota {quota} MiB")
|
||||
{ StatusCode = 403 };
|
||||
}
|
||||
|
||||
if (!Directory.Exists(_tempPath))
|
||||
{
|
||||
Directory.CreateDirectory(_tempPath);
|
||||
}
|
||||
|
||||
// Check if a file with the same hash already exists
|
||||
var existingFile = await db.Files.FirstOrDefaultAsync(f => f.Hash == request.Hash);
|
||||
if (existingFile != null)
|
||||
{
|
||||
return Ok(new CreateUploadTaskResponse
|
||||
{
|
||||
FileExists = true,
|
||||
File = existingFile
|
||||
});
|
||||
}
|
||||
|
||||
var taskId = await Nanoid.GenerateAsync();
|
||||
var taskPath = Path.Combine(_tempPath, taskId);
|
||||
Directory.CreateDirectory(taskPath);
|
||||
|
||||
var chunkSize = request.ChunkSize ?? DefaultChunkSize;
|
||||
var chunksCount = (int)Math.Ceiling((double)request.FileSize / chunkSize);
|
||||
|
||||
var task = new UploadTask
|
||||
{
|
||||
TaskId = taskId,
|
||||
FileName = request.FileName,
|
||||
FileSize = request.FileSize,
|
||||
ContentType = request.ContentType,
|
||||
ChunkSize = chunkSize,
|
||||
ChunksCount = chunksCount,
|
||||
PoolId = request.PoolId,
|
||||
BundleId = request.BundleId,
|
||||
EncryptPassword = request.EncryptPassword,
|
||||
ExpiredAt = request.ExpiredAt,
|
||||
Hash = request.Hash,
|
||||
};
|
||||
|
||||
await System.IO.File.WriteAllTextAsync(Path.Combine(taskPath, "task.json"), JsonSerializer.Serialize(task));
|
||||
|
||||
return Ok(new CreateUploadTaskResponse
|
||||
{
|
||||
FileExists = false,
|
||||
TaskId = taskId,
|
||||
ChunkSize = chunkSize,
|
||||
ChunksCount = chunksCount
|
||||
});
|
||||
}
|
||||
|
||||
[HttpPost("chunk/{taskId}/{chunkIndex}")]
|
||||
[RequestSizeLimit(DefaultChunkSize + 1024 * 1024)] // 6MB to be safe
|
||||
[RequestFormLimits(MultipartBodyLengthLimit = DefaultChunkSize + 1024 * 1024)]
|
||||
public async Task<IActionResult> UploadChunk(string taskId, int chunkIndex, [FromForm] IFormFile chunk)
|
||||
{
|
||||
var taskPath = Path.Combine(_tempPath, taskId);
|
||||
if (!Directory.Exists(taskPath))
|
||||
{
|
||||
return NotFound("Upload task not found.");
|
||||
}
|
||||
|
||||
var chunkPath = Path.Combine(taskPath, $"{chunkIndex}.chunk");
|
||||
await using var stream = new FileStream(chunkPath, FileMode.Create);
|
||||
await chunk.CopyToAsync(stream);
|
||||
|
||||
return Ok();
|
||||
}
|
||||
|
||||
[HttpPost("complete/{taskId}")]
|
||||
public async Task<IActionResult> CompleteUpload(string taskId)
|
||||
{
|
||||
var taskPath = Path.Combine(_tempPath, taskId);
|
||||
if (!Directory.Exists(taskPath))
|
||||
{
|
||||
return NotFound("Upload task not found.");
|
||||
}
|
||||
|
||||
var taskJsonPath = Path.Combine(taskPath, "task.json");
|
||||
if (!System.IO.File.Exists(taskJsonPath))
|
||||
{
|
||||
return NotFound("Upload task metadata not found.");
|
||||
}
|
||||
|
||||
var task = JsonSerializer.Deserialize<UploadTask>(await System.IO.File.ReadAllTextAsync(taskJsonPath));
|
||||
if (task == null)
|
||||
{
|
||||
return BadRequest("Invalid task metadata.");
|
||||
}
|
||||
|
||||
var mergedFilePath = Path.Combine(_tempPath, taskId + ".tmp");
|
||||
await using (var mergedStream = new FileStream(mergedFilePath, FileMode.Create))
|
||||
{
|
||||
for (var i = 0; i < task.ChunksCount; i++)
|
||||
{
|
||||
var chunkPath = Path.Combine(taskPath, $"{i}.chunk");
|
||||
if (!System.IO.File.Exists(chunkPath))
|
||||
{
|
||||
// Clean up partially uploaded file
|
||||
mergedStream.Close();
|
||||
System.IO.File.Delete(mergedFilePath);
|
||||
Directory.Delete(taskPath, true);
|
||||
return BadRequest($"Chunk {i} is missing.");
|
||||
}
|
||||
|
||||
await using var chunkStream = new FileStream(chunkPath, FileMode.Open);
|
||||
await chunkStream.CopyToAsync(mergedStream);
|
||||
}
|
||||
}
|
||||
|
||||
if (HttpContext.Items["CurrentUser"] is not Account currentUser) return Unauthorized();
|
||||
|
||||
var fileId = await Nanoid.GenerateAsync();
|
||||
|
||||
await using (var fileStream =
|
||||
new FileStream(mergedFilePath, FileMode.Open, FileAccess.Read, FileShare.Read))
|
||||
{
|
||||
var cloudFile = await fileService.ProcessNewFileAsync(
|
||||
currentUser,
|
||||
fileId,
|
||||
task.PoolId,
|
||||
task.BundleId,
|
||||
fileStream,
|
||||
task.FileName,
|
||||
task.ContentType,
|
||||
task.EncryptPassword,
|
||||
task.ExpiredAt
|
||||
);
|
||||
|
||||
// Clean up
|
||||
Directory.Delete(taskPath, true);
|
||||
System.IO.File.Delete(mergedFilePath);
|
||||
|
||||
return Ok(cloudFile);
|
||||
}
|
||||
}
|
||||
}
|
42
DysonNetwork.Drive/Storage/Model/FileUploadModels.cs
Normal file
42
DysonNetwork.Drive/Storage/Model/FileUploadModels.cs
Normal file
@@ -0,0 +1,42 @@
|
||||
using DysonNetwork.Drive.Storage;
|
||||
using NodaTime;
|
||||
|
||||
namespace DysonNetwork.Drive.Storage.Model
|
||||
{
|
||||
public class CreateUploadTaskRequest
|
||||
{
|
||||
public string Hash { get; set; } = null!;
|
||||
public string FileName { get; set; } = null!;
|
||||
public long FileSize { get; set; }
|
||||
public string ContentType { get; set; } = null!;
|
||||
public string PoolId { get; set; } = null!;
|
||||
public string? BundleId { get; set; }
|
||||
public string? EncryptPassword { get; set; }
|
||||
public Instant? ExpiredAt { get; set; }
|
||||
public long? ChunkSize { get; set; }
|
||||
}
|
||||
|
||||
public class CreateUploadTaskResponse
|
||||
{
|
||||
public bool FileExists { get; set; }
|
||||
public CloudFile? File { get; set; }
|
||||
public string? TaskId { get; set; }
|
||||
public long? ChunkSize { get; set; }
|
||||
public int? ChunksCount { get; set; }
|
||||
}
|
||||
|
||||
internal class UploadTask
|
||||
{
|
||||
public string TaskId { get; set; } = null!;
|
||||
public string FileName { get; set; } = null!;
|
||||
public long FileSize { get; set; }
|
||||
public string ContentType { get; set; } = null!;
|
||||
public long ChunkSize { get; set; }
|
||||
public int ChunksCount { get; set; }
|
||||
public string PoolId { get; set; } = null!;
|
||||
public string? BundleId { get; set; }
|
||||
public string? EncryptPassword { get; set; }
|
||||
public Instant? ExpiredAt { get; set; }
|
||||
public string Hash { get; set; } = null!;
|
||||
}
|
||||
}
|
94
DysonNetwork.Drive/Storage/README.md
Normal file
94
DysonNetwork.Drive/Storage/README.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Multi-part File Upload API
|
||||
|
||||
This document outlines the process for uploading large files in chunks using the multi-part upload API.
|
||||
|
||||
## 1. Create an Upload Task
|
||||
|
||||
To begin a file upload, you first need to create an upload task. This is done by sending a `POST` request to the `/api/files/upload/create` endpoint.
|
||||
|
||||
**Endpoint:** `POST /api/files/upload/create`
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
{
|
||||
"hash": "string (file hash, e.g., MD5 or SHA256)",
|
||||
"file_name": "string",
|
||||
"file_size": "long (in bytes)",
|
||||
"content_type": "string (e.g., 'image/jpeg')",
|
||||
"pool_id": "string (GUID)",
|
||||
"bundle_id": "string (GUID, optional)",
|
||||
"encrypt_password": "string (optional)",
|
||||
"expired_at": "string (ISO 8601 format, optional)",
|
||||
"chunk_size": "long (in bytes, optional, defaults to 5MB)"
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
|
||||
If a file with the same hash already exists, the server will return a `200 OK` with the following body:
|
||||
|
||||
```json
|
||||
{
|
||||
"file_exists": true,
|
||||
"file": { ... (CloudFile object in snake_case) ... }
|
||||
}
|
||||
```
|
||||
|
||||
If the file does not exist, the server will return a `200 OK` with a task ID and chunk information:
|
||||
|
||||
```json
|
||||
{
|
||||
"file_exists": false,
|
||||
"task_id": "string",
|
||||
"chunk_size": "long",
|
||||
"chunks_count": "int"
|
||||
}
|
||||
```
|
||||
|
||||
You will need the `task_id`, `chunk_size`, and `chunks_count` for the next steps.
|
||||
|
||||
## 2. Upload File Chunks
|
||||
|
||||
Once you have a `task_id`, you can start uploading the file in chunks. Each chunk is sent as a `POST` request with `multipart/form-data`.
|
||||
|
||||
**Endpoint:** `POST /api/files/upload/chunk/{taskId}/{chunkIndex}`
|
||||
|
||||
- `taskId`: The ID of the upload task from the previous step.
|
||||
- `chunkIndex`: The 0-based index of the chunk you are uploading.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
The body of the request should be `multipart/form-data` with a single form field named `chunk` containing the binary data for that chunk.
|
||||
|
||||
The size of each chunk should be equal to the `chunk_size` returned in the "Create Upload Task" step, except for the last chunk, which may be smaller.
|
||||
|
||||
**Response:**
|
||||
|
||||
A successful chunk upload will return a `200 OK` with an empty body.
|
||||
|
||||
You should upload all chunks from `0` to `chunks_count - 1`.
|
||||
|
||||
## 3. Complete the Upload
|
||||
|
||||
After all chunks have been successfully uploaded, you must send a final request to complete the upload process. This will merge all the chunks into a single file and process it.
|
||||
|
||||
**Endpoint:** `POST /api/files/upload/complete/{taskId}`
|
||||
|
||||
- `taskId`: The ID of the upload task.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
The request body should be empty.
|
||||
|
||||
**Response:**
|
||||
|
||||
A successful request will return a `200 OK` with the `CloudFile` object for the newly uploaded file.
|
||||
|
||||
```json
|
||||
{
|
||||
... (CloudFile object) ...
|
||||
}
|
||||
```
|
||||
|
||||
If any chunks are missing or an error occurs during the merge process, the server will return a `400 Bad Request` with an error message.
|
@@ -40,6 +40,7 @@
|
||||
"StorePath": "Uploads"
|
||||
},
|
||||
"Storage": {
|
||||
"Uploads": "Uploads",
|
||||
"PreferredRemote": "2adceae3-981a-4564-9b8d-5d71a211c873",
|
||||
"Remote": [
|
||||
{
|
||||
|
Reference in New Issue
Block a user