aboutsummaryrefslogtreecommitdiffhomepage
path: root/server
diff options
context:
space:
mode:
authorq_h <q_h@inbox.ru>2022-09-08 09:54:12 +0300
committerGitHub <noreply@github.com>2022-09-08 08:54:12 +0200
commit23c0b67d7b91fb44ca24b68d959b0ce62ae237c5 (patch)
tree80482d64d6235aab3547f0741b70133c884faaaf /server
parente9fc9e03c120fb048ed00b38157d15144770ec23 (diff)
downloadPeerTube-23c0b67d7b91fb44ca24b68d959b0ce62ae237c5.tar.gz
PeerTube-23c0b67d7b91fb44ca24b68d959b0ce62ae237c5.tar.zst
PeerTube-23c0b67d7b91fb44ca24b68d959b0ce62ae237c5.zip
fix: validate s3 response (#5231)
* refactor: remove `objectStoragePut` this is already implemented in `lib-storage` * fix: validate s3 response * fix: enable built-in retries * chore: add `leavePartsOnError` comment * refactor: decrease partSize to speed up retries * refactor: rethrow s3 errors * refactor: reduce max_upload_part default to 100MB * refactor: validate response * chore: add link to explanation
Diffstat (limited to 'server')
-rw-r--r--server/lib/object-storage/shared/object-storage-helpers.ts53
1 files changed, 18 insertions, 35 deletions
diff --git a/server/lib/object-storage/shared/object-storage-helpers.ts b/server/lib/object-storage/shared/object-storage-helpers.ts
index a2de92532..16161362c 100644
--- a/server/lib/object-storage/shared/object-storage-helpers.ts
+++ b/server/lib/object-storage/shared/object-storage-helpers.ts
@@ -1,11 +1,11 @@
1import { createReadStream, createWriteStream, ensureDir, ReadStream, stat } from 'fs-extra' 1import { createReadStream, createWriteStream, ensureDir, ReadStream } from 'fs-extra'
2import { dirname } from 'path' 2import { dirname } from 'path'
3import { Readable } from 'stream' 3import { Readable } from 'stream'
4import { 4import {
5 CompleteMultipartUploadCommandOutput,
5 DeleteObjectCommand, 6 DeleteObjectCommand,
6 GetObjectCommand, 7 GetObjectCommand,
7 ListObjectsV2Command, 8 ListObjectsV2Command,
8 PutObjectCommand,
9 PutObjectCommandInput 9 PutObjectCommandInput
10} from '@aws-sdk/client-s3' 10} from '@aws-sdk/client-s3'
11import { Upload } from '@aws-sdk/lib-storage' 11import { Upload } from '@aws-sdk/lib-storage'
@@ -31,14 +31,9 @@ async function storeObject (options: {
31 31
32 logger.debug('Uploading file %s to %s%s in bucket %s', inputPath, bucketInfo.PREFIX, objectStorageKey, bucketInfo.BUCKET_NAME, lTags()) 32 logger.debug('Uploading file %s to %s%s in bucket %s', inputPath, bucketInfo.PREFIX, objectStorageKey, bucketInfo.BUCKET_NAME, lTags())
33 33
34 const stats = await stat(inputPath)
35 const fileStream = createReadStream(inputPath) 34 const fileStream = createReadStream(inputPath)
36 35
37 if (stats.size > CONFIG.OBJECT_STORAGE.MAX_UPLOAD_PART) { 36 return uploadToStorage({ objectStorageKey, content: fileStream, bucketInfo })
38 return multiPartUpload({ content: fileStream, objectStorageKey, bucketInfo })
39 }
40
41 return objectStoragePut({ objectStorageKey, content: fileStream, bucketInfo })
42} 37}
43 38
44async function removeObject (filename: string, bucketInfo: BucketInfo) { 39async function removeObject (filename: string, bucketInfo: BucketInfo) {
@@ -132,31 +127,7 @@ export {
132 127
133// --------------------------------------------------------------------------- 128// ---------------------------------------------------------------------------
134 129
135async function objectStoragePut (options: { 130async function uploadToStorage (options: {
136 objectStorageKey: string
137 content: ReadStream
138 bucketInfo: BucketInfo
139}) {
140 const { objectStorageKey, content, bucketInfo } = options
141
142 const input: PutObjectCommandInput = {
143 Bucket: bucketInfo.BUCKET_NAME,
144 Key: buildKey(objectStorageKey, bucketInfo),
145 Body: content
146 }
147
148 if (CONFIG.OBJECT_STORAGE.UPLOAD_ACL) {
149 input.ACL = CONFIG.OBJECT_STORAGE.UPLOAD_ACL
150 }
151
152 const command = new PutObjectCommand(input)
153
154 await getClient().send(command)
155
156 return getPrivateUrl(bucketInfo, objectStorageKey)
157}
158
159async function multiPartUpload (options: {
160 content: ReadStream 131 content: ReadStream
161 objectStorageKey: string 132 objectStorageKey: string
162 bucketInfo: BucketInfo 133 bucketInfo: BucketInfo
@@ -177,11 +148,23 @@ async function multiPartUpload (options: {
177 client: getClient(), 148 client: getClient(),
178 queueSize: 4, 149 queueSize: 4,
179 partSize: CONFIG.OBJECT_STORAGE.MAX_UPLOAD_PART, 150 partSize: CONFIG.OBJECT_STORAGE.MAX_UPLOAD_PART,
180 leavePartsOnError: false, 151
152 // `leavePartsOnError` must be set to `true` to avoid silently dropping failed parts
153 // More detailed explanation:
154 // https://github.com/aws/aws-sdk-js-v3/blob/v3.164.0/lib/lib-storage/src/Upload.ts#L274
155 // https://github.com/aws/aws-sdk-js-v3/issues/2311#issuecomment-939413928
156 leavePartsOnError: true,
181 params: input 157 params: input
182 }) 158 })
183 159
184 await parallelUploads3.done() 160 const response = (await parallelUploads3.done()) as CompleteMultipartUploadCommandOutput
161 // Check is needed even if the HTTP status code is 200 OK
162 // For more information, see https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
163 if (!response.Bucket) {
164 const message = `Error uploading ${objectStorageKey} to bucket ${bucketInfo.BUCKET_NAME}`
165 logger.error(message, { response, ...lTags() })
166 throw new Error(message)
167 }
185 168
186 logger.debug( 169 logger.debug(
187 'Completed %s%s in bucket %s', 170 'Completed %s%s in bucket %s',