]> git.immae.eu Git - github/Chocobozzz/PeerTube.git/commitdiff
fix: validate s3 response (#5231)
authorq_h <q_h@inbox.ru>
Thu, 8 Sep 2022 06:54:12 +0000 (09:54 +0300)
committerGitHub <noreply@github.com>
Thu, 8 Sep 2022 06:54:12 +0000 (08:54 +0200)
* refactor:  remove `objectStoragePut`

this is already implemented in `lib-storage`

* fix:  validate s3 response

* fix:  enable built-in retries

* chore: add `leavePartsOnError` comment

* refactor: decrease partSize to speed up retries

* refactor: rethrow s3 errors

* refactor: reduce max_upload_part  default to 100MB

* refactor: validate response

* chore: add link to explanation

config/default.yaml
config/production.yaml.example
server/lib/object-storage/shared/object-storage-helpers.ts

index 9bf1ca2847199a7d51b1e31aa612b3c8000a86b4..0b0a54eef3f78af9feea2db6f51409c7973f1fd2 100644 (file)
@@ -152,7 +152,7 @@ object_storage:
     secret_access_key: ''
 
   # Maximum amount to upload in one request to object storage
-  max_upload_part: 2GB
+  max_upload_part: 100MB
 
   streaming_playlists:
     bucket_name: 'streaming-playlists'
index f6dc6ccdb82d673ca5ef8587d7446ba960fe6d47..209aaa56ac1a6c3b8fe5a14e31bb490643f2ed39 100644 (file)
@@ -150,7 +150,7 @@ object_storage:
     secret_access_key: ''
 
   # Maximum amount to upload in one request to object storage
-  max_upload_part: 2GB
+  max_upload_part: 100MB
 
   streaming_playlists:
     bucket_name: 'streaming-playlists'
index a2de9253290b57208df8742fdf9677e43150c4ad..16161362cbfffd1e9784b30016baf777eda4bff2 100644 (file)
@@ -1,11 +1,11 @@
-import { createReadStream, createWriteStream, ensureDir, ReadStream, stat } from 'fs-extra'
+import { createReadStream, createWriteStream, ensureDir, ReadStream } from 'fs-extra'
 import { dirname } from 'path'
 import { Readable } from 'stream'
 import {
+  CompleteMultipartUploadCommandOutput,
   DeleteObjectCommand,
   GetObjectCommand,
   ListObjectsV2Command,
-  PutObjectCommand,
   PutObjectCommandInput
 } from '@aws-sdk/client-s3'
 import { Upload } from '@aws-sdk/lib-storage'
@@ -31,14 +31,9 @@ async function storeObject (options: {
 
   logger.debug('Uploading file %s to %s%s in bucket %s', inputPath, bucketInfo.PREFIX, objectStorageKey, bucketInfo.BUCKET_NAME, lTags())
 
-  const stats = await stat(inputPath)
   const fileStream = createReadStream(inputPath)
 
-  if (stats.size > CONFIG.OBJECT_STORAGE.MAX_UPLOAD_PART) {
-    return multiPartUpload({ content: fileStream, objectStorageKey, bucketInfo })
-  }
-
-  return objectStoragePut({ objectStorageKey, content: fileStream, bucketInfo })
+  return uploadToStorage({ objectStorageKey, content: fileStream, bucketInfo })
 }
 
 async function removeObject (filename: string, bucketInfo: BucketInfo) {
@@ -132,31 +127,7 @@ export {
 
 // ---------------------------------------------------------------------------
 
-async function objectStoragePut (options: {
-  objectStorageKey: string
-  content: ReadStream
-  bucketInfo: BucketInfo
-}) {
-  const { objectStorageKey, content, bucketInfo } = options
-
-  const input: PutObjectCommandInput = {
-    Bucket: bucketInfo.BUCKET_NAME,
-    Key: buildKey(objectStorageKey, bucketInfo),
-    Body: content
-  }
-
-  if (CONFIG.OBJECT_STORAGE.UPLOAD_ACL) {
-    input.ACL = CONFIG.OBJECT_STORAGE.UPLOAD_ACL
-  }
-
-  const command = new PutObjectCommand(input)
-
-  await getClient().send(command)
-
-  return getPrivateUrl(bucketInfo, objectStorageKey)
-}
-
-async function multiPartUpload (options: {
+async function uploadToStorage (options: {
   content: ReadStream
   objectStorageKey: string
   bucketInfo: BucketInfo
@@ -177,11 +148,23 @@ async function multiPartUpload (options: {
     client: getClient(),
     queueSize: 4,
     partSize: CONFIG.OBJECT_STORAGE.MAX_UPLOAD_PART,
-    leavePartsOnError: false,
+
+    // `leavePartsOnError` must be set to `true` to avoid silently dropping failed parts
+    // More detailed explanation:
+    // https://github.com/aws/aws-sdk-js-v3/blob/v3.164.0/lib/lib-storage/src/Upload.ts#L274
+    // https://github.com/aws/aws-sdk-js-v3/issues/2311#issuecomment-939413928
+    leavePartsOnError: true,
     params: input
   })
 
-  await parallelUploads3.done()
+  const response = (await parallelUploads3.done()) as CompleteMultipartUploadCommandOutput
+  // Check is needed even if the HTTP status code is 200 OK
+  // For more information, see https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+  if (!response.Bucket) {
+    const message = `Error uploading ${objectStorageKey} to bucket ${bucketInfo.BUCKET_NAME}`
+    logger.error(message, { response, ...lTags() })
+    throw new Error(message)
+  }
 
   logger.debug(
     'Completed %s%s in bucket %s',