// Copyright Epic Games, Inc. All Rights Reserved. using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.ComponentModel.DataAnnotations; using System.Globalization; using System.IO; using System.Linq; using System.Net; using System.Net.Mime; using System.Text; using System.Text.Json.Serialization; using System.Threading; using System.Threading.Tasks; using EpicGames.AspNet; using EpicGames.Core; using EpicGames.Horde.Storage; using EpicGames.Serialization; using JetBrains.Annotations; using Jupiter.Common; using Jupiter.Common.Implementation; using Jupiter.Implementation; using Jupiter.Implementation.Blob; using Jupiter.Utils; using Microsoft.AspNetCore.Authorization; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Http.Extensions; using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using Microsoft.Extensions.Primitives; using OpenTelemetry.Trace; using ContentHash = Jupiter.Implementation.ContentHash; using ContentId = Jupiter.Implementation.ContentId; namespace Jupiter.Controllers { using IDiagnosticContext = Serilog.IDiagnosticContext; [ApiController] [FormatFilter] [Produces(MediaTypeNames.Application.Json, MediaTypeNames.Application.Octet, CustomMediaTypeNames.UnrealCompactBinary)] [Route("api/v1/refs")] [Authorize] public class ReferencesController : ControllerBase { private readonly IDiagnosticContext _diagnosticContext; private readonly FormatResolver _formatResolver; private readonly BufferedPayloadFactory _bufferedPayloadFactory; private readonly IReferenceResolver _referenceResolver; private readonly NginxRedirectHelper _nginxRedirectHelper; private readonly INamespacePolicyResolver _namespacePolicyResolver; private readonly IRequestHelper _requestHelper; private readonly Tracer _tracer; private readonly ILogger _logger; private readonly IOptionsMonitor _settings; private readonly IRefService _refService; private readonly IBlobService _blobStore; private readonly IBlobIndex _blobIndex; private readonly IPeerStatusService _peerStatusService; public ReferencesController(IRefService refService, IBlobService blobStore, IBlobIndex blobIndex, IPeerStatusService peerStatusService, IDiagnosticContext diagnosticContext, FormatResolver formatResolver, BufferedPayloadFactory bufferedPayloadFactory, IReferenceResolver referenceResolver, NginxRedirectHelper nginxRedirectHelper, INamespacePolicyResolver namespacePolicyResolver, IRequestHelper requestHelper, Tracer tracer, ILogger logger, IOptionsMonitor settings) { _refService = refService; _blobStore = blobStore; _blobIndex = blobIndex; _peerStatusService = peerStatusService; _diagnosticContext = diagnosticContext; _formatResolver = formatResolver; _bufferedPayloadFactory = bufferedPayloadFactory; _referenceResolver = referenceResolver; _nginxRedirectHelper = nginxRedirectHelper; _namespacePolicyResolver = namespacePolicyResolver; _requestHelper = requestHelper; _tracer = tracer; _logger = logger; _settings = settings; } /// /// Returns all the known namespace the token has access to /// /// [HttpGet("")] [ProducesDefaultResponseType] [ProducesResponseType(type: typeof(ProblemDetails), 400)] public async Task GetNamespacesAsync() { NamespaceId[] namespaces = await _refService.GetNamespacesAsync(HttpContext.RequestAborted).ToArrayAsync(HttpContext.RequestAborted); // filter namespaces down to only the namespaces the user has access to List namespacesWithAccess = new(); foreach (NamespaceId ns in namespaces) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.ReadObject }); if (accessResult == null) { namespacesWithAccess.Add(ns); } } if (!namespacesWithAccess.Any()) { return new ForbidResult(); } return Ok(new GetNamespacesResponse(namespacesWithAccess.ToArray())); } /// /// Returns ref in a bucket /// /// [HttpGet("{ns}/{bucket}")] [ProducesDefaultResponseType] [ProducesResponseType(type: typeof(ProblemDetails), 400)] public async Task EnumerateBucketAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket ) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.EnumerateBucket }); if (accessResult != null) { return accessResult; } if (_settings.CurrentValue.RequirePrivatePortForEnumeration && _requestHelper.IsPublicPort(Request.HttpContext)) { return Forbid(); } RefId[] refIds = await _refService.GetRecordsInBucketAsync(ns, bucket).ToArrayAsync(); return Ok(new EnumerateBucketResponse(refIds)); } /// /// Returns a refs key /// /// Namespace. Each namespace is completely separated from each other. Use for different types of data that is never expected to be similar (between two different games for instance). Example: `uc4.ddc` /// The category/type of record you are caching. Is a clustered key together with the actual key, but all records in the same bucket can be dropped easily. Example: `terrainTexture` /// The unique name of this particular key. `iAmAVeryValidKey` /// Optional specifier to set which output format is used json/raw/cb [HttpGet("{ns}/{bucket}/{key}.{format?}", Order = 500)] [Produces(MediaTypeNames.Application.Json, MediaTypeNames.Application.Octet, CustomMediaTypeNames.UnrealCompactBinary, CustomMediaTypeNames.JupiterInlinedPayload, CustomMediaTypeNames.UnrealCompactBinaryPackage)] public async Task GetAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key, [FromRoute] string? format = null) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.ReadObject }); if (accessResult != null) { return accessResult; } try { Tracer.CurrentSpan.SetAttribute("bucket", bucket.ToString()); Tracer.CurrentSpan.SetAttribute("namespace", ns.ToString()); (RefRecord objectRecord, BlobContents? maybeBlob) = await _refService.GetAsync(ns, bucket, key, Array.Empty()); if (maybeBlob == null) { throw new InvalidOperationException($"Blob was null when attempting to fetch {ns} {bucket} {key}"); } await using BlobContents blob = maybeBlob; if (!objectRecord.IsFinalized) { // we do not consider un-finalized objects as valid return BadRequest(new ProblemDetails { Title = $"Object {objectRecord.Bucket} {objectRecord.Name} is not finalized." }); } Response.Headers[CommonHeaders.HashHeaderName] = objectRecord.BlobIdentifier.ToString(); Response.Headers[CommonHeaders.LastAccessHeaderName] = objectRecord.LastAccess.ToString(CultureInfo.InvariantCulture); async Task WriteBody(BlobContents blobContents, string contentType) { IServerTiming? serverTiming = Request.HttpContext.RequestServices.GetService(); using ServerTimingMetricScoped? serverTimingScope = serverTiming?.CreateServerTimingMetricScope("body.write", "Time spent writing body"); long contentLength = blobContents.Length; using TelemetrySpan scope = _tracer.StartActiveSpan("body.write").SetAttribute("operation.name", "body.write"); scope.SetAttribute("content-length", contentLength); const int BufferSize = 64 * 1024; Stream outputStream = Response.Body; Response.ContentLength = contentLength; Response.ContentType = contentType; Response.StatusCode = StatusCodes.Status200OK; try { await StreamCopyOperation.CopyToAsync(blobContents.Stream, outputStream, count: null, bufferSize: BufferSize, cancel: Response.HttpContext.RequestAborted); } catch (OperationCanceledException) { // do not raise exceptions for cancelled writes // as we have already started writing a response we can not change the status code // so we just drop a warning and proceed _logger.LogWarning("The operation was canceled while writing the body"); } } string responseType = _formatResolver.GetResponseType(Request, format, CustomMediaTypeNames.UnrealCompactBinary); Tracer.CurrentSpan.SetAttribute("response-type", responseType); switch (responseType) { case CustomMediaTypeNames.UnrealCompactBinary: { if (_nginxRedirectHelper.CanRedirect(Request, blob)) { return _nginxRedirectHelper.CreateActionResult(blob, responseType); } // for compact binary we can just serialize our internal object await WriteBody(blob, CustomMediaTypeNames.UnrealCompactBinary); break; } case MediaTypeNames.Application.Octet: { byte[] blobMemory = await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted); CbObject cb = new CbObject(blobMemory); (int, CbField?) CountFields(CbObject o) { int count = 0; CbField? foundField = null; o.IterateAttachments(field => { ++count; if (field.IsBinaryAttachment()) { foundField = field; } }); return (count, foundField); } // breaking lambda call into private method to workaround incorrect triggering of CA1508 - https://github.com/dotnet/roslyn-analyzers/issues/5254 (int countOfAttachmentFields, CbField? binaryAttachmentField) = CountFields(cb); if (countOfAttachmentFields == 1 && binaryAttachmentField != null) { // there is a single attachment field and that is of the binary attachment type, fetch that attachment and return it instead of the compact binary // this is so that we match the uploaded that happened as a octet-stream which generates a small cb object with a single attachment IoHash hash = binaryAttachmentField.AsBinaryAttachment(); BlobContents referencedBlobContents = await _blobStore.GetObjectAsync(ns, BlobId.FromIoHash(hash), bucketHint: bucket); if (_nginxRedirectHelper.CanRedirect(Request, referencedBlobContents)) { return _nginxRedirectHelper.CreateActionResult(referencedBlobContents, MediaTypeNames.Application.Octet); } await WriteBody(referencedBlobContents, MediaTypeNames.Application.Octet); break; } // this doesn't look like the generated compact binary so we just return the payload await using BlobContents contents = new(blobMemory); await WriteBody(contents, MediaTypeNames.Application.Octet); break; } case MediaTypeNames.Application.Json: { byte[] blobMemory; { using TelemetrySpan scope = _tracer.StartActiveSpan("json.readblob").SetAttribute("operation.name", "json.readblob"); blobMemory = await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted); } CbObject cb = new CbObject(blobMemory); string s = cb.ToJson(); await using BlobContents contents = new BlobContents(Encoding.UTF8.GetBytes(s)); await WriteBody(contents, MediaTypeNames.Application.Json); break; } case CustomMediaTypeNames.UnrealCompactBinaryPackage: { using TelemetrySpan packageScope = _tracer.StartActiveSpan("cbpackage.fetch").SetAttribute("operation.name", "cbpackage.fetch"); byte[] blobMemory = await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted); CbObject cb = new CbObject(blobMemory); IAsyncEnumerable attachments = _referenceResolver.GetAttachmentsAsync(ns, cb, HttpContext.RequestAborted); using CbPackageBuilder writer = new CbPackageBuilder(); writer.AddAttachment(objectRecord.BlobIdentifier.AsIoHash(), CbPackageAttachmentFlags.IsObject, blobMemory); await Parallel.ForEachAsync(attachments, async (attachment, token) => { IoHash attachmentHash = attachment.AsIoHash(); CbPackageAttachmentFlags flags = 0; try { BlobContents attachmentContents; if (attachment is BlobAttachment blobAttachment) { BlobId referencedBlob = blobAttachment.Identifier; attachmentContents = await _blobStore.GetObjectAsync(ns, referencedBlob, bucketHint: bucket, cancellationToken: HttpContext.RequestAborted); } else if (attachment is ObjectAttachment objectAttachment) { flags |= CbPackageAttachmentFlags.IsObject; BlobId referencedBlob = objectAttachment.Identifier; attachmentContents = await _blobStore.GetObjectAsync(ns, referencedBlob, bucketHint: bucket, cancellationToken: HttpContext.RequestAborted); } else if (attachment is ContentIdAttachment contentIdAttachment) { ContentId contentId = contentIdAttachment.Identifier; (attachmentContents, string mime, BlobId? _) = await _blobStore.GetCompressedObjectAsync(ns, contentId, HttpContext.RequestServices, cancellationToken: HttpContext.RequestAborted); if (mime == CustomMediaTypeNames.UnrealCompressedBuffer) { flags |= CbPackageAttachmentFlags.IsCompressed; } else { // this resolved to a uncompressed blob, the content id existed the the compressed blob didn't // so resetting flags to indicate this. flags = 0; } } else { throw new NotSupportedException($"Unknown attachment type {attachment.GetType()}"); } writer.AddAttachment(attachmentHash, flags, attachmentContents.Stream, (ulong)attachmentContents.Length); } catch (Exception e) { (CbObject errorObject, HttpStatusCode _) = ToErrorResult(e); writer.AddAttachment(attachmentHash, CbPackageAttachmentFlags.IsError | CbPackageAttachmentFlags.IsObject, errorObject.GetView().ToArray()); } }); byte[] packageBytes; { using TelemetrySpan _ = _tracer.StartActiveSpan("cbpackage.buffer").SetAttribute("operation.name", "cbpackage.buffer"); packageBytes = await writer.ToByteArrayAsync(); } await using BlobContents contents = new BlobContents(packageBytes); await WriteBody(contents, CustomMediaTypeNames.UnrealCompactBinaryPackage); break; } case CustomMediaTypeNames.JupiterInlinedPayload: { byte[] blobMemory = await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted); CbObject cb = new CbObject(blobMemory); static (int, int) CountFields(CbObject o) { int countOfBinaryAttachmentFields = 0; int countOfAttachmentFields = 0; o.IterateAttachments(field => { if (field.IsBinaryAttachment()) { ++countOfBinaryAttachmentFields; } if (field.IsAttachment()) { ++countOfAttachmentFields; } }); return (countOfBinaryAttachmentFields, countOfAttachmentFields); } // breaking lambda call into private method to workaround incorrect triggering of CA1508 - https://github.com/dotnet/roslyn-analyzers/issues/5254 (int countOfAttachmentFields, int countOfBinaryAttachmentFields) = CountFields(cb); // if the object consists of a single attachment field we return this attachment field instead if (countOfBinaryAttachmentFields == 1 && countOfAttachmentFields == 1) { // fetch the blob so we can resolve any content ids in it List referencedBlobs; try { IAsyncEnumerable referencedBlobsEnumerable = _referenceResolver.GetReferencedBlobsAsync(ns, cb); referencedBlobs = await referencedBlobsEnumerable.ToListAsync(); } catch (PartialReferenceResolveException) { return NotFound(new ProblemDetails { Title = $"Object {bucket} {key} in namespace {ns} was missing some content ids" }); } catch (ReferenceIsMissingBlobsException) { return NotFound(new ProblemDetails { Title = $"Object {bucket} {key} in namespace {ns} was missing some blobs" }); } if (referencedBlobs.Count == 1) { BlobId attachmentToSend = referencedBlobs.First(); try { BlobContents referencedBlobContents = await _blobStore.GetObjectAsync(ns, attachmentToSend, bucketHint: bucket); Response.Headers[CommonHeaders.InlinePayloadHash] = attachmentToSend.ToString(); if (_nginxRedirectHelper.CanRedirect(Request, referencedBlobContents)) { return _nginxRedirectHelper.CreateActionResult(referencedBlobContents, CustomMediaTypeNames.JupiterInlinedPayload); } await WriteBody(referencedBlobContents, CustomMediaTypeNames.JupiterInlinedPayload); } catch (BlobNotFoundException) { return NotFound(new ProblemDetails { Title = $"Object {bucket} {key} in namespace {ns} was missing blob {attachmentToSend}" }); } catch (Exception ex) { Tracer.CurrentSpan.SetStatus(Status.Error); Tracer.CurrentSpan.RecordException(ex); _logger.LogError(ex, "Unknown exception encountered while writing body for jupiter inlined payload."); throw; } return new EmptyResult(); } else if (referencedBlobs.Count == 0) { return NotFound(new ProblemDetails { Title = $"Object {objectRecord.Bucket} {objectRecord.Name} did not resolve into any objects that we could find." }); } return BadRequest(new ProblemDetails { Title = $"Object {objectRecord.Bucket} {objectRecord.Name} contained a content id which resolved to more then 1 blob, unable to inline this object. Use compact object response instead." }); } else if (countOfBinaryAttachmentFields == 0 && countOfAttachmentFields == 0) { // no attachments so we just return the compact object instead await using BlobContents contents = new BlobContents(blobMemory); await WriteBody(contents, CustomMediaTypeNames.JupiterInlinedPayload); return new EmptyResult(); } return BadRequest(new ProblemDetails { Title = $"Object {objectRecord.Bucket} {objectRecord.Name} had more then 1 binary attachment field, unable to inline this object. Use compact object response instead." }); } default: throw new NotImplementedException($"Unknown expected response type {responseType}"); } // this result is ignored as we write to the body explicitly return new EmptyResult(); } catch (NamespaceNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Namespace {e.Namespace} did not exist" }); } catch (RefNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Bucket} {e.Key} did not exist" }); } catch (BlobNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Blob} in {e.Ns} not found" }); } } /// /// Returns the metadata about a ref key /// /// Namespace. Each namespace is completely separated from each other. Use for different types of data that is never expected to be similar (between two different games for instance). Example: `uc4.ddc` /// The category/type of record you are caching. Is a clustered key together with the actual key, but all records in the same bucket can be dropped easily. Example: `terrainTexture` /// The unique name of this particular key. `iAmAVeryValidKey` /// The fields to include in the response, omit this to include everything. [HttpGet("{ns}/{bucket}/{key}/metadata", Order = 500)] public async Task GetMetadataAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key, [FromQuery] string[] fields) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.ReadObject }); if (accessResult != null) { return accessResult; } try { (RefRecord objectRecord, BlobContents? _) = await _refService.GetAsync(ns, bucket, key, fields); return Ok(new RefMetadataResponse(objectRecord)); } catch (NamespaceNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Namespace {e.Namespace} did not exist" }); } catch (RefNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Bucket} {e.Key} did not exist" }); } catch (BlobNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Blob} in {e.Ns} not found" }); } } /// /// Returns the replicated state of this ref across all known cloud ddc regions /// /// Namespace. Each namespace is completely separated from each other. Use for different types of data that is never expected to be similar (between two different games for instance). Example: `uc4.ddc` /// The category/type of record you are caching. Is a clustered key together with the actual key, but all records in the same bucket can be dropped easily. Example: `terrainTexture` /// The unique name of this particular key. `iAmAVeryValidKey` [HttpGet("{ns}/{bucket}/{key}/replicationState", Order = 500)] public async Task GetReplicationStateAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.AdminAction }); if (accessResult != null) { return accessResult; } try { List blobs = await _refService.GetReferencedBlobsAsync(ns, bucket, key, ignoreMissingBlobs: true); Dictionary> blobStatePerRegion = new Dictionary>(); await Parallel.ForEachAsync(blobs, async (blobId, cancellationToken) => { Dictionary blobState = new Dictionary(); foreach (string region in _peerStatusService.GetRegions()) { bool exists = await _blobIndex.BlobExistsInRegionAsync(ns, blobId, region, CancellationToken.None); blobState.TryAdd(region, exists); } lock (blobStatePerRegion) { blobStatePerRegion[blobId.ToString()] = blobState; } }); return Ok(new RefReplicationStateResponse(ns, bucket, key, blobStatePerRegion)); } catch (NamespaceNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Namespace {e.Namespace} did not exist" }); } catch (RefNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Bucket} {e.Key} did not exist" }); } catch (BlobNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Blob} in {e.Ns} not found" }); } } /// /// Returns the blobs referenced by this ref /// /// Namespace. Each namespace is completely separated from each other. Use for different types of data that is never expected to be similar (between two different games for instance). Example: `uc4.ddc` /// The category/type of record you are caching. Is a clustered key together with the actual key, but all records in the same bucket can be dropped easily. Example: `terrainTexture` /// The unique name of this particular key. `iAmAVeryValidKey` [HttpGet("{ns}/{bucket}/{key}/references", Order = 500)] public async Task GetReferencedBlobsAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.ReadObject }); if (accessResult != null) { return accessResult; } RefRecord refRecord; BlobContents? blob; try { (refRecord, blob) = await _refService.GetAsync(ns, bucket, key, fields: Array.Empty(), doLastAccessTracking: false, cancellationToken: HttpContext.RequestAborted); } catch (BlobNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Blob} not found" }); } catch (NamespaceNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Namespace {e.Namespace} did not exist" }); } catch (RefNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Bucket} {e.Key} did not exist" }); } if (blob == null) { throw new Exception("Failed to find blob contents for ref"); } byte[] blobContents = await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted); if (blobContents.Length == 0) { _logger.LogWarning("0 byte object found for {Id} {Namespace}", refRecord.BlobIdentifier, ns); } CbObject compactBinaryObject; try { compactBinaryObject = new CbObject(blobContents); } catch (IndexOutOfRangeException) { return Problem(title: $"{refRecord.BlobIdentifier} was not a proper compact binary object.", detail: "Index out of range"); } try { List references = await _referenceResolver.GetReferencedBlobsAsync(ns, compactBinaryObject).ToListAsync(); if (refRecord.InlinePayload == null) { // payload is not inlined, so it needs to be replicated references.Add(refRecord.BlobIdentifier); } return Ok(new ResolvedReferencesResult(references.ToArray())); } catch (PartialReferenceResolveException e) { return BadRequest(new ValidationProblemDetails { Title = $"Object {bucket} {key} did not exist", Detail = $"Following content ids are invalid: {string.Join(",", e.UnresolvedReferences)}" }); } catch (ReferenceIsMissingBlobsException e) { return BadRequest(new ValidationProblemDetails { Title = $"Object {bucket} {key} did not exist", Detail = $"Following blobs are missing: {string.Join(",", e.MissingBlobs)}" }); } } /// /// Checks if a object exists /// /// Namespace. Each namespace is completely separated from each other. Use for different types of data that is never expected to be similar (between two different games for instance). Example: `uc4.ddc` /// The category/type of record you are caching. Is a clustered key together with the actual key, but all records in the same bucket can be dropped easily. Example: `terrainTexture` /// The unique name of this particular key. `iAmAVeryValidKey` /// 200 if it existed, 400 otherwise [HttpHead("{ns}/{bucket}/{key}", Order = 500)] [ProducesResponseType(type: typeof(OkResult), 200)] [ProducesResponseType(type: typeof(ValidationProblemDetails), 400)] public async Task HeadAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.ReadObject }); if (accessResult != null) { return accessResult; } try { (RefRecord record, BlobContents? blob) = await _refService.GetAsync(ns, bucket, key, new string[] { "blobIdentifier", "IsFinalized" }); Response.Headers[CommonHeaders.HashHeaderName] = record.BlobIdentifier.ToString(); if (!record.IsFinalized) { return NotFound(new ProblemDetails { Title = $"Object {bucket} {key} in namespace {ns} is not finalized." }); } blob ??= await _blobStore.GetObjectAsync(ns, record.BlobIdentifier, bucketHint: bucket); // we have to verify the blobs are available locally, as the record of the key is replicated a head of the content // TODO: Once we support inline replication this step is not needed as at least one region as this blob, just maybe not this current one byte[] blobContents = await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted); CbObject compactBinaryObject = new CbObject(blobContents); // the reference resolver will throw if any blob is missing, so no need to do anything other then process each reference IAsyncEnumerable references = _referenceResolver.GetReferencedBlobsAsync(ns, compactBinaryObject); List? _ = await references.ToListAsync(); // we have to verify the blobs are available locally, as the record of the key is replicated a head of the content // TODO: Once we support inline replication this step is not needed as at least one region as this blob, just maybe not this current one BlobId[] unknownBlobs = await _blobStore.FilterOutKnownBlobsAsync(ns, new BlobId[] { record.BlobIdentifier }, HttpContext.RequestAborted); if (unknownBlobs.Length != 0) { return NotFound(new ProblemDetails { Title = $"Object {bucket} {key} in namespace {ns} had at least one missing blob." }); } } catch (NamespaceNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Namespace {e.Namespace} did not exist" }); } catch (BlobNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Blob {e.Blob} in namespace {ns} did not exist" }); } catch (RefNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Object {e.Bucket} {e.Key} in namespace {e.Namespace} did not exist" }); } catch (PartialReferenceResolveException) { return NotFound(new ProblemDetails { Title = $"Object {bucket} {key} in namespace {ns} was missing some content ids" }); } catch (ReferenceIsMissingBlobsException) { return NotFound(new ProblemDetails { Title = $"Object {bucket} {key} in namespace {ns} was missing some blobs" }); } return Ok(); } [HttpGet("{ns}/exists")] [ProducesDefaultResponseType] public async Task ExistsMultipleAsync( [FromRoute][Required] NamespaceId ns, [FromQuery][Required] List names) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.ReadObject }); if (accessResult != null) { return accessResult; } ConcurrentBag<(BucketId, RefId)> missingObject = new(); List<(BucketId, RefId)> requestedNames = new List<(BucketId, RefId)>(); foreach (string name in names) { int separatorIndex = name.IndexOf(".", StringComparison.Ordinal); if (separatorIndex == -1) { return BadRequest(new ProblemDetails() { Title = $"Key {name} did not contain a '.' separator" }); } BucketId bucket = new BucketId(name.Substring(0, separatorIndex)); RefId key = new RefId(name.Substring(separatorIndex + 1)); requestedNames.Add((bucket, key)); } IEnumerable tasks = requestedNames.Select(async pair => { (BucketId bucket, RefId key) = pair; try { (RefRecord record, BlobContents? blob) = await _refService.GetAsync(ns, bucket, key, new string[] { "blobIdentifier" }); blob ??= await _blobStore.GetObjectAsync(ns, record.BlobIdentifier, bucketHint: bucket); // we have to verify the blobs are available locally, as the record of the key is replicated a head of the content // TODO: Once we support inline replication this step is not needed as at least one region as this blob, just maybe not this current one byte[] blobContents = await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted); CbObject cb = new CbObject(blobContents); // the reference resolver will throw if any blob is missing, so no need to do anything other then process each reference IAsyncEnumerable references = _referenceResolver.GetReferencedBlobsAsync(ns, cb); List? _ = await references.ToListAsync(); } catch (RefNotFoundException) { missingObject.Add((bucket, key)); } catch (PartialReferenceResolveException) { missingObject.Add((bucket, key)); } catch (ReferenceIsMissingBlobsException) { missingObject.Add((bucket, key)); } catch (BlobNotFoundException) { missingObject.Add((bucket, key)); } }); await Task.WhenAll(tasks); return Ok(new ExistCheckMultipleRefsResponse(missingObject.ToList())); } [HttpPut("{ns}/{bucket}/{key}.{format?}", Order = 500)] [DisableRequestSizeLimit] public async Task PutObjectAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key, [FromHeader(Name = CommonHeaders.AllowOverwrite)] bool allowOverwrite = false) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.WriteObject }); if (accessResult != null) { return accessResult; } _diagnosticContext.Set("Content-Length", Request.ContentLength ?? -1); Tracer.CurrentSpan.SetAttribute("bucket", bucket.ToString()); Tracer.CurrentSpan.SetAttribute("namespace", ns.ToString()); CbObject payloadObject; BlobId blobHeader; try { using IBufferedPayload payload = await _bufferedPayloadFactory.CreateFromRequestAsync(Request, "put-blob-ref", HttpContext.RequestAborted); BlobId headerHash; if (Request.Headers.TryGetValue(CommonHeaders.HashHeaderName, out StringValues headers)) { if (!StringValues.IsNullOrEmpty(headers)) { headerHash = new BlobId(headers.ToString()); } else { return BadRequest(new ProblemDetails { Title = $"Header {CommonHeaders.HashHeaderName} was empty" }); } } else { return BadRequest(new ProblemDetails { Title = $"Missing expected header {CommonHeaders.HashHeaderName}" }); } blobHeader = headerHash; switch (Request.ContentType) { case MediaTypeNames.Application.Json: { // TODO: define a scheme for how a json object specifies references blobHeader = await _blobStore.PutObjectAsync(ns, payload, headerHash, bucketHint: bucket, cancellationToken: HttpContext.RequestAborted); // TODO: convert the json object into a compact binary instead CbWriter writer = new CbWriter(); writer.BeginObject(); writer.WriteBinaryAttachmentValue(blobHeader.AsIoHash()); writer.EndObject(); byte[] blob = writer.ToByteArray(); payloadObject = new CbObject(blob); blobHeader = BlobId.FromBlob(blob); break; } case CustomMediaTypeNames.UnrealCompactBinary: { await using MemoryStream ms = new MemoryStream(); await using Stream payloadStream = payload.GetStream(); await payloadStream.CopyToAsync(ms); payloadObject = new CbObject(ms.ToArray()); break; } case MediaTypeNames.Application.Octet: { blobHeader = await _blobStore.PutObjectAsync(ns, payload, headerHash, bucketHint: bucket, cancellationToken: HttpContext.RequestAborted); CbWriter writer = new CbWriter(); writer.BeginObject(); writer.WriteBinaryAttachment("RawHash", blobHeader.AsIoHash()); writer.WriteInteger("RawSize", payload.Length); writer.EndObject(); byte[] blob = writer.ToByteArray(); payloadObject = new CbObject(blob); blobHeader = BlobId.FromBlob(blob); break; } default: throw new Exception($"Unknown request type {Request.ContentType}, if submitting a blob please use {MediaTypeNames.Application.Octet}"); } } catch (HashMismatchException e) { return BadRequest(new ProblemDetails { Title = $"Incorrect hash, got hash \"{e.SuppliedHash}\" but hash of content was determined to be \"{e.ContentHash}\"" }); } catch (ClientSendSlowException e) { return Problem(e.Message, null, (int)HttpStatusCode.RequestTimeout); } try { { using TelemetrySpan scope = _tracer.StartActiveSpan("ref.put").SetAttribute("operation.name", "ref.put"); (ContentId[] missingReferences, BlobId[] missingBlobs) = await _refService.PutAsync(ns, bucket, key, blobHeader, payloadObject, allowOverwrite: allowOverwrite, cancellationToken: HttpContext.RequestAborted); List missingHashes = new List(missingReferences); missingHashes.AddRange(missingBlobs); ContentHash[] missingArray = missingHashes.ToArray(); scope.SetAttribute("NeedsCount", missingArray.Length); return Ok(new PutObjectResponse(missingArray)); } } catch (RefAlreadyExistsException e) { byte[]? payload = e.OldObject.InlinePayload; if (payload == null || payload.Length == 0) { using TelemetrySpan scope = _tracer.StartActiveSpan("blob.get").SetAttribute("operation.name", "blob.get"); try { await using BlobContents blobContents = await _blobStore.GetObjectAsync(ns, e.OldObject.BlobIdentifier, cancellationToken: HttpContext.RequestAborted); payload = await blobContents.Stream.ToByteArrayAsync(HttpContext.RequestAborted); } catch (BlobNotFoundException) { (CbObject errorObject, _) = ToErrorResult("Failed to find old ref during overwrite"); payload = errorObject.GetView().ToArray(); } } // if a ref exists we send back the existing ref, always as a compact binary as that is how it is stored Response.ContentType = CustomMediaTypeNames.UnrealCompactBinary; Response.StatusCode = (int)HttpStatusCode.Conflict; await Response.Body.WriteAsync(payload); return new EmptyResult(); } } [HttpPut("{ns}/{bucket}/{key}", Order = 300)] [DisableRequestSizeLimit] [RequiredContentType(CustomMediaTypeNames.UnrealCompactBinaryPackage)] public async Task PutPackageAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.WriteObject }); if (accessResult != null) { return accessResult; } _diagnosticContext.Set("Content-Length", Request.ContentLength ?? -1); Tracer.CurrentSpan.SetAttribute("bucket", bucket.ToString()); Tracer.CurrentSpan.SetAttribute("namespace", ns.ToString()); byte[] b = await RequestUtil.ReadRawBodyAsync(Request); CbPackageReader packageReader = await CbPackageReader.CreateAsync(new MemoryStream(b)); try { await foreach ((CbPackageAttachmentEntry entry, byte[] blob) in packageReader.IterateAttachmentsAsync()) { if (entry.Flags.HasFlag(CbPackageAttachmentFlags.IsError)) { return BadRequest(new ProblemDetails { Title = $"Package contained attachment with error {entry.AttachmentHash}\"" }); } if (entry.Flags.HasFlag(CbPackageAttachmentFlags.IsCompressed)) { #pragma warning disable CA2000 // Dispose objects before losing scope using MemoryBufferedPayload payload = new MemoryBufferedPayload(blob); #pragma warning restore CA2000 // Dispose objects before losing scope await _blobStore.PutCompressedObjectAsync(ns, payload, ContentId.FromIoHash(entry.AttachmentHash), HttpContext.RequestServices, HttpContext.RequestAborted); } else { await _blobStore.PutObjectAsync(ns, blob, BlobId.FromIoHash(entry.AttachmentHash), bucketHint: bucket, cancellationToken: HttpContext.RequestAborted); } } } catch (HashMismatchException e) { return BadRequest(new ProblemDetails { Title = $"Incorrect hash, got hash \"{e.SuppliedHash}\" but hash of content was determined to be \"{e.ContentHash}\"" }); } CbObject rootObject = packageReader.RootObject; BlobId rootObjectHash = BlobId.FromIoHash(packageReader.RootHash); (ContentId[] missingReferences, BlobId[] missingBlobs) = await _refService.PutAsync(ns, bucket, key, rootObjectHash, rootObject, cancellationToken: HttpContext.RequestAborted); List missingHashes = new List(missingReferences); missingHashes.AddRange(missingBlobs); return Ok(new PutObjectResponse(missingHashes.ToArray())); } [HttpPost("{ns}/{bucket}/{key}/finalize/{hash}.{format?}")] public async Task FinalizeObjectAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key, [FromRoute][Required] BlobId hash) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.WriteObject }); if (accessResult != null) { return accessResult; } try { Tracer.CurrentSpan.SetAttribute("bucket", bucket.ToString()); Tracer.CurrentSpan.SetAttribute("namespace", ns.ToString()); (ContentId[] missingReferences, BlobId[] missingBlobs) = await _refService.FinalizeAsync(ns, bucket, key, hash, cancellationToken: HttpContext.RequestAborted); List missingHashes = new List(missingReferences); missingHashes.AddRange(missingBlobs); return Ok(new PutObjectResponse(missingHashes.ToArray())); } catch (ObjectHashMismatchException e) { return BadRequest(e.Message); } catch (RefNotFoundException e) { return NotFound(e.Message); } } [HttpGet("{ns}/{bucket}/{key}/blobs/{id}")] [ProducesResponseType(type: typeof(byte[]), 200)] [ProducesResponseType(type: typeof(ValidationProblemDetails), 400)] [Produces(CustomMediaTypeNames.UnrealCompressedBuffer, MediaTypeNames.Application.Octet)] public async Task GetBlobAsync( [FromRoute][Required] NamespaceId ns, #pragma warning disable IDE0060 [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key, #pragma warning restore IDE0060 [Required] ContentId id, [FromQuery] bool supportsRedirect = false) { ActionResult? result = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.ReadObject }); if (result != null) { return result; } try { (BlobContents blobContents, string mediaType, BlobId? contentHash) = await _blobStore.GetCompressedObjectAsync(ns, id, HttpContext.RequestServices, supportsRedirectUri: supportsRedirect); StringValues acceptHeader = Request.Headers["Accept"]; if (!acceptHeader.Contains("*/*") && acceptHeader.Count != 0 && !acceptHeader.Contains(mediaType)) { return new UnsupportedMediaTypeResult(); } if (contentHash != null && Request.Headers.Range.Count == 0) { // send the hash of the object is we are fetching the full blob Response.Headers[CommonHeaders.HashHeaderName] = contentHash.ToString(); } if (blobContents.RedirectUri != null) { return Redirect(blobContents.RedirectUri.ToString()); } if (_nginxRedirectHelper.CanRedirect(Request, blobContents)) { return _nginxRedirectHelper.CreateActionResult(blobContents, mediaType); } return File(blobContents.Stream, mediaType, enableRangeProcessing: true); } catch (BlobNotFoundException e) { return NotFound(new ValidationProblemDetails { Title = $"Object {e.Blob} not found" }); } catch (ContentIdResolveException e) { return NotFound(new ValidationProblemDetails { Title = $"Content Id {e.ContentId} not found" }); } } [HttpPut("{ns}/{bucket}/{key}/blobs/{id}")] [DisableRequestSizeLimit] [RequiredContentType(CustomMediaTypeNames.UnrealCompressedBuffer, MediaTypeNames.Application.Octet)] public async Task PutBlobAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, #pragma warning disable IDE0060 [FromRoute][Required] RefId key, #pragma warning restore IDE0060 [Required] BlobId id) { ActionResult? result = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.WriteObject }); if (result != null) { return result; } _diagnosticContext.Set("Content-Length", Request.ContentLength ?? -1); try { bool? bypassCache = _namespacePolicyResolver.GetPoliciesForNs(ns).BypassCacheOnWrite; if (Request.ContentType == CustomMediaTypeNames.UnrealCompressedBuffer) { ContentId cid = ContentId.FromBlobIdentifier(id); using IBufferedPayload payload = await _bufferedPayloadFactory.CreateFromRequestAsync(Request, "put-blob-ref-cb", HttpContext.RequestAborted); ContentId identifier = await _blobStore.PutCompressedObjectAsync(ns, payload, cid, HttpContext.RequestServices, bucketHint: bucket, bypassCache: bypassCache, cancellationToken: HttpContext.RequestAborted); return Ok(new BlobUploadResponse(identifier.AsBlobIdentifier())); } else if (Request.ContentType == MediaTypeNames.Application.Octet) { Uri? uri = await _blobStore.MaybePutObjectWithRedirectAsync(ns, id, bucketHint: bucket, cancellationToken: HttpContext.RequestAborted); if (uri != null) { return Ok(new BlobUploadUriResponse(id, uri)); } using IBufferedPayload payload = await _bufferedPayloadFactory.CreateFromRequestAsync(Request, "put-blob-ref-raw", HttpContext.RequestAborted); BlobId identifier = await _blobStore.PutObjectAsync(ns, payload, id, bucketHint: bucket, bypassCache: bypassCache, cancellationToken: HttpContext.RequestAborted); return Ok(new BlobUploadResponse(identifier)); } else { throw new NotImplementedException("Unsupported mediatype: " + Request.ContentType); } } catch (HashMismatchException e) { return BadRequest(new ProblemDetails { Title = $"Incorrect hash, got hash \"{e.SuppliedHash}\" but hash of content was determined to be \"{e.ContentHash}\"" }); } catch (ResourceHasToManyRequestsException) { return StatusCode(StatusCodes.Status429TooManyRequests); } catch (ClientSendSlowException e) { return Problem(e.Message, null, (int)HttpStatusCode.RequestTimeout); } } [HttpPost("{ns}")] [Consumes(CustomMediaTypeNames.UnrealCompactBinary)] [Produces(CustomMediaTypeNames.UnrealCompactBinary)] [ApiExplorerSettings(IgnoreApi = true)] public async Task BatchAsync( [FromRoute][Required] NamespaceId ns, [FromBody][Required] BatchOps ops) { JupiterAclAction ActionForOp(BatchOps.BatchOp.Operation op) { switch (op) { case BatchOps.BatchOp.Operation.GET: return JupiterAclAction.ReadObject; case BatchOps.BatchOp.Operation.PUT: return JupiterAclAction.WriteObject; case BatchOps.BatchOp.Operation.HEAD: return JupiterAclAction.ReadObject; default: throw new ArgumentOutOfRangeException(nameof(op), op, null); } } JupiterAclAction[] requiredActions = ops.Ops.Select(op => ActionForOp(op.Op)).ToArray(); ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, requiredActions); if (accessResult != null) { return accessResult; } Tracer.CurrentSpan.SetAttribute("CountOfOps", ops.Ops.Length); HashSet usedOpIds = new HashSet(); foreach (BatchOps.BatchOp batchOp in ops.Ops) { bool added = usedOpIds.Add(batchOp.OpId); if (!added) { return BadRequest(new ProblemDetails { Title = $"Duplicate op ids used for id: {batchOp.OpId}" }); } } ConcurrentDictionary results = new(); async Task<(CbObject, HttpStatusCode)> BatchGetOp(BatchOps.BatchOp op) { try { (RefRecord objectRecord, BlobContents? blob) = await _refService.GetAsync(ns, op.Bucket, op.Key, Array.Empty()); if (!objectRecord.IsFinalized) { return ToErrorResult("object not finalized", HttpStatusCode.BadRequest); } if (blob == null) { throw new Exception(); } CbObject cb = new CbObject(await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted)); if (op.ResolveAttachments ?? false) { IAsyncEnumerable references = _referenceResolver.GetReferencedBlobsAsync(ns, cb); List? _ = await references.ToListAsync(); } return (cb, HttpStatusCode.OK); } catch (Exception ex) when (ex is RefNotFoundException or PartialReferenceResolveException or ReferenceIsMissingBlobsException) { return ToErrorResult(ex, HttpStatusCode.NotFound); } catch (Exception e) { Tracer.CurrentSpan.SetStatus(Status.Error); Tracer.CurrentSpan.RecordException(e); return ToErrorResult(e); } } async Task<(CbObject, HttpStatusCode)> BatchHeadOp(BatchOps.BatchOp op) { try { (RefRecord record, BlobContents? blob) = await _refService.GetAsync(ns, op.Bucket, op.Key, new string[] { "blobIdentifier" }); if (!record.IsFinalized) { return (CbObject.Build(writer => writer.WriteBool("exists", false)), HttpStatusCode.NotFound); } blob ??= await _blobStore.GetObjectAsync(ns, record.BlobIdentifier, bucketHint: op.Bucket); if (op.ResolveAttachments ?? false) { byte[] blobContents = await blob.Stream.ToByteArrayAsync(HttpContext.RequestAborted); CbObject cb = new CbObject(blobContents); // the reference resolver will throw if any blob is missing, so no need to do anything other then process each reference IAsyncEnumerable references = _referenceResolver.GetReferencedBlobsAsync(ns, cb); List? _ = await references.ToListAsync(); } if (blob == null) { throw new Exception(); } return (CbObject.Build(writer => writer.WriteBool("exists", true)), HttpStatusCode.OK); } catch (Exception ex) when (ex is RefNotFoundException or PartialReferenceResolveException or ReferenceIsMissingBlobsException) { return (CbObject.Build(writer => writer.WriteBool("exists", false)), HttpStatusCode.NotFound); } catch (Exception e) { return ToErrorResult(e); } } async Task<(CbObject, HttpStatusCode)> BatchPutOp(BatchOps.BatchOp op) { try { if (op.Payload == null || op.Payload.Equals(CbObject.Empty)) { throw new Exception($"Missing payload for operation: {op.OpId}"); } if (op.PayloadHash == null) { throw new Exception($"Missing payload hash for operation: {op.OpId}"); } BlobId headerHash = BlobId.FromContentHash(op.PayloadHash); BlobId objectHash = BlobId.FromBlob(op.Payload.GetView().ToArray()); if (!headerHash.Equals(objectHash)) { throw new HashMismatchException(headerHash, objectHash); } (ContentId[] missingReferences, BlobId[] missingBlobs) = await _refService.PutAsync(ns, op.Bucket, op.Key, objectHash, op.Payload, cancellationToken: HttpContext.RequestAborted); List missingHashes = new List(missingReferences); return (CbSerializer.Serialize(new PutObjectResponse(missingHashes.ToArray())), HttpStatusCode.OK); } catch (Exception e) { return ToErrorResult(e); } } await Parallel.ForEachAsync(ops.Ops, CancellationToken.None, async (op, token) => { switch (op.Op) { case BatchOps.BatchOp.Operation.GET: results.TryAdd(op.OpId, await BatchGetOp(op)); break; case BatchOps.BatchOp.Operation.PUT: results.TryAdd(op.OpId, await BatchPutOp(op)); break; case BatchOps.BatchOp.Operation.HEAD: results.TryAdd(op.OpId, await BatchHeadOp(op)); break; case BatchOps.BatchOp.Operation.INVALID: default: throw new NotImplementedException($"Unknown op type {op.Op}"); } await Task.CompletedTask; }); return Ok(new BatchOpsResponse() { Results = results.Select(result => { return new BatchOpsResponse.OpResponses() { OpId = result.Key, Response = result.Value.Item1, StatusCode = (int)result.Value.Item2 }; }).ToList() }); } private static (CbObject, HttpStatusCode) ToErrorResult(Exception exception, HttpStatusCode statusCode = HttpStatusCode.InternalServerError) { Exception e = exception; CbWriter writer = new CbWriter(); writer.BeginObject(); writer.WriteString("title", e.Message); writer.WriteInteger("status", (int)statusCode); if (e.StackTrace != null) { writer.WriteString("stackTrace", e.StackTrace); } writer.EndObject(); return (writer.ToObject(), statusCode); } private static (CbObject, HttpStatusCode) ToErrorResult(string message, HttpStatusCode statusCode = HttpStatusCode.InternalServerError) { CbWriter writer = new CbWriter(); writer.BeginObject(); writer.WriteString("title", message); writer.WriteInteger("status", (int)statusCode); writer.EndObject(); return (writer.ToObject(), statusCode); } /// /// Drop all refs records in the namespace /// /// Namespace. Each namespace is completely separated from each other. Use for different types of data that is never expected to be similar (between two different games for instance) [HttpDelete("{ns}", Order = 500)] [ProducesResponseType(204)] public async Task DeleteNamespaceAsync( [FromRoute][Required] NamespaceId ns ) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.DeleteNamespace }); if (accessResult != null) { return accessResult; } try { await _refService.DropNamespaceAsync(ns, HttpContext.RequestAborted); } catch (NamespaceNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Namespace {e.Namespace} did not exist" }); } return NoContent(); } /// /// Drop all refs records in the bucket /// /// Namespace. Each namespace is completely separated from each other. Use for different types of data that is never expected to be similar (between two different games for instance) /// The category/type of record you are caching. Is a clustered key together with the actual key, but all records in the same bucket can be dropped easily. [HttpDelete("{ns}/{bucket}", Order = 500)] [ProducesResponseType(200)] public async Task DeleteBucketAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.DeleteBucket }); if (accessResult != null) { return accessResult; } long countOfDeletedRecords; try { countOfDeletedRecords = await _refService.DeleteBucketAsync(ns, bucket, HttpContext.RequestAborted); } catch (NamespaceNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Namespace {e.Namespace} did not exist" }); } return Ok(new BucketDeletedResponse(countOfDeletedRecords)); } /// /// Delete a individual refs key /// /// Namespace. Each namespace is completely separated from each other. Use for different types of data that is never expected to be similar (between two different games for instance) /// The category/type of record you are caching. Is a clustered key together with the actual key, but all records in the same bucket can be dropped easily. /// The unique name of this particular key [HttpDelete("{ns}/{bucket}/{key}", Order = 500)] [ProducesResponseType(200)] [ProducesResponseType(404)] public async Task DeleteAsync( [FromRoute][Required] NamespaceId ns, [FromRoute][Required] BucketId bucket, [FromRoute][Required] RefId key) { ActionResult? accessResult = await _requestHelper.HasAccessToNamespaceAsync(User, Request, ns, new[] { JupiterAclAction.DeleteObject }); if (accessResult != null) { return accessResult; } try { bool deleted = await _refService.DeleteAsync(ns, bucket, key, HttpContext.RequestAborted); return Ok(new RefDeletedResponse(deleted ? 1 : 0)); } catch (NamespaceNotFoundException e) { return NotFound(new ProblemDetails { Title = $"Namespace {e.Namespace} did not exist" }); } catch (RefNotFoundException) { return Ok(new RefDeletedResponse(0)); } } } public class RefReplicationStateResponse { public RefReplicationStateResponse(NamespaceId ns, BucketId bucket, RefId key, Dictionary> blobsPerRegion) { Ns = ns; Bucket = bucket; Key = key; BlobsPerRegion = blobsPerRegion; } public NamespaceId Ns { get;set; } public BucketId Bucket { get;set; } public RefId Key { get;set; } public Dictionary> BlobsPerRegion { get; init; } } public class RefDeletedResponse { public RefDeletedResponse() { } public RefDeletedResponse(int deletedCount) { DeletedCount = deletedCount; } [CbField("deletedCount")] public int DeletedCount { get; set; } } public class BucketDeletedResponse { public BucketDeletedResponse() { } public BucketDeletedResponse(long countOfDeletedRecords) { CountOfDeletedRecords = countOfDeletedRecords; } [CbField("countOfDeletedRecords")] public long CountOfDeletedRecords { get; set; } } public class BatchOps { public BatchOps() { Ops = Array.Empty(); } [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1034:Nested types should not be visible", Justification = "For serialization only")] public class BatchOp { public BatchOp() { Payload = null; PayloadHash = null; } public enum Operation { INVALID, GET, PUT, HEAD, } [Required] [CbField("opId")] public uint OpId { get; set; } [CbField("op")] [JsonIgnore] [UsedImplicitly] public string OpString { get => Op.ToString(); set => Op = Enum.Parse(value); } [Required] public Operation Op { get; set; } = Operation.INVALID; [Required] [CbField("bucket")] public BucketId Bucket { get; set; } [Required] [CbField("key")] public RefId Key { get; set; } [CbField("resolveAttachments")] public bool? ResolveAttachments { get; set; } = null; [CbField("payload")] public CbObject? Payload { get; set; } = null; [CbField("payloadHash")] public ContentHash? PayloadHash { get; set; } = null; } [CbField("ops")] public BatchOp[] Ops { get; set; } } public class BatchOpsResponse { public BatchOpsResponse() { } [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1034:Nested types should not be visible", Justification = "For serialization only")] public class OpResponses { public OpResponses() { } [CbField("opId")] public uint OpId { get; set; } [CbField("response")] public CbObject Response { get; set; } = null!; [CbField("statusCode")] public int StatusCode { get; set; } } [CbField("results")] [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "CA2227:Collection properties should be read only", Justification = "Used by serialization")] public List Results { get; set; } = new List(); } public class RefMetadataResponse { public RefMetadataResponse() { PayloadIdentifier = null!; InlinePayload = null!; } [JsonConstructor] public RefMetadataResponse(NamespaceId ns, BucketId bucket, RefId name, BlobId payloadIdentifier, DateTime lastAccess, bool isFinalized, byte[]? inlinePayload) { Ns = ns; Bucket = bucket; Name = name; PayloadIdentifier = payloadIdentifier; LastAccess = lastAccess; IsFinalized = isFinalized; InlinePayload = inlinePayload; } public RefMetadataResponse(RefRecord objectRecord) { Ns = objectRecord.Namespace; Bucket = objectRecord.Bucket; Name = objectRecord.Name; PayloadIdentifier = objectRecord.BlobIdentifier; LastAccess = objectRecord.LastAccess; IsFinalized = objectRecord.IsFinalized; InlinePayload = objectRecord.InlinePayload; } [CbField("ns")] public NamespaceId Ns { get; set; } [CbField("bucket")] public BucketId Bucket { get; set; } [CbField("name")] public RefId Name { get; set; } [CbField("payloadIdentifier")] public BlobId PayloadIdentifier { get; set; } [CbField("lastAccess")] public DateTime LastAccess { get; set; } [CbField("isFinalized")] public bool IsFinalized { get; set; } [CbField("inlinePayload")] public byte[]? InlinePayload { get; set; } } public class EnumerateBucketResponse { public EnumerateBucketResponse() { RefIds = null!; } [JsonConstructor] public EnumerateBucketResponse(RefId[] refIds) { RefIds = refIds; } [CbField("refs")] public RefId[] RefIds { get; set; } } public class PutObjectResponse { public PutObjectResponse() { Needs = null!; } public PutObjectResponse(ContentHash[] missingReferences) { Needs = missingReferences; } [CbField("needs")] public ContentHash[] Needs { get; set; } } public class ExistCheckMultipleRefsResponse { public ExistCheckMultipleRefsResponse(List<(BucketId, RefId)> missing) { Missing = missing.Select(pair => { (BucketId bucketId, RefId ioHashKey) = pair; return new MissingReference() { Bucket = bucketId, Key = ioHashKey, }; }).ToList(); } [JsonConstructor] public ExistCheckMultipleRefsResponse(List missing) { Missing = missing; } [CbField("missing")] [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "CA2227:Collection properties should be read only", Justification = "Used by serialization")] public List Missing { get; set; } [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1034:Nested types should not be visible", Justification = "For serialization only")] public class MissingReference { [CbField("bucket")] public BucketId Bucket { get; set; } [CbField("key")] public RefId Key { get; set; } } } public class GetNamespacesResponse { public GetNamespacesResponse() { Namespaces = Array.Empty(); } [JsonConstructor] public GetNamespacesResponse(NamespaceId[] namespaces) { Namespaces = namespaces; } [CbField("namespaces")] public NamespaceId[] Namespaces { get; set; } } public class GetBucketsResponse { public GetBucketsResponse() { Buckets = Array.Empty(); } [JsonConstructor] public GetBucketsResponse(BucketId[] buckets) { Buckets = buckets; } public BucketId[] Buckets { get; set; } } }