></my-peertube-checkbox>
</div>
+ <div class="form-group">
+ <my-peertube-checkbox
+ inputName="transcodingAllowAudioFiles" formControlName="allowAudioFiles"
+ i18n-labelText labelText="Allow audio files upload"
+ i18n-helpHtml helpHtml="Allow your users to upload audio files that will be merged with the preview file on upload"
+ ></my-peertube-checkbox>
+ </div>
+
<div class="form-group">
<label i18n for="transcodingThreads">Transcoding threads</label>
<div class="peertube-select-container">
enabled: null,
threads: this.customConfigValidatorsService.TRANSCODING_THREADS,
allowAdditionalExtensions: null,
+ allowAudioFiles: null,
resolutions: {}
},
autoBlacklist: {
private flushPlayer () {
// Remove player if it exists
if (this.player) {
- this.player.dispose()
- this.player = undefined
+ try {
+ this.player.dispose()
+ this.player = undefined
+ } catch (err) {
+ console.error('Cannot dispose player.', err)
+ }
}
}
}
videojs(options.common.playerElement, videojsOptions, function (this: any) {
const player = this
- player.tech_.one('error', () => self.maybeFallbackToWebTorrent(mode, player, options))
- player.one('error', () => self.maybeFallbackToWebTorrent(mode, player, options))
+ let alreadyFallback = false
+
+ player.tech_.one('error', () => {
+ if (!alreadyFallback) self.maybeFallbackToWebTorrent(mode, player, options)
+ alreadyFallback = true
+ })
+
+ player.one('error', () => {
+ if (!alreadyFallback) self.maybeFallbackToWebTorrent(mode, player, options)
+ alreadyFallback = true
+ })
self.addContextMenu(mode, player, options.common.embedUrl)
enabled: true
# Allow your users to upload .mkv, .mov, .avi, .flv videos
allow_additional_extensions: true
+ # If a user uploads an audio file, PeerTube will create a video by merging the preview file and the audio file
+ allow_audio_files: true
threads: 1
resolutions: # Only created if the original video has a higher resolution, uses more storage!
240p: false
enabled: true
# Allow your users to upload .mkv, .mov, .avi, .flv videos
allow_additional_extensions: true
+ # If a user uploads an audio file, PeerTube will create a video by merging the preview file and the audio file
+ allow_audio_files: true
threads: 1
resolutions: # Only created if the original video has a higher resolution, uses more storage!
240p: false
transcoding:
enabled: true
allow_additional_extensions: false
+ allow_audio_files: false
threads: 2
resolutions:
240p: true
import { VideoModel } from '../server/models/video/video'
import { initDatabaseModels } from '../server/initializers'
import { JobQueue } from '../server/lib/job-queue'
+import { VideoTranscodingPayload } from '../server/lib/job-queue/handlers/video-transcoding'
program
.option('-v, --video [videoUUID]', 'Video UUID')
const video = await VideoModel.loadByUUIDWithFile(program['video'])
if (!video) throw new Error('Video not found.')
- const dataInput = {
- videoUUID: video.uuid,
- isNewVideo: false,
- resolution: undefined
- }
-
- if (program.resolution !== undefined) {
- dataInput.resolution = program.resolution
- }
+ const dataInput: VideoTranscodingPayload = program.resolution !== undefined
+ ? { type: 'new-resolution' as 'new-resolution', videoUUID: video.uuid, isNewVideo: false, resolution: program.resolution }
+ : { type: 'optimize' as 'optimize', videoUUID: video.uuid, isNewVideo: false }
await JobQueue.Instance.init()
await JobQueue.Instance.createJob({ type: 'video-transcoding', payload: dataInput })
transcoding: {
enabled: CONFIG.TRANSCODING.ENABLED,
allowAdditionalExtensions: CONFIG.TRANSCODING.ALLOW_ADDITIONAL_EXTENSIONS,
+ allowAudioFiles: CONFIG.TRANSCODING.ALLOW_AUDIO_FILES,
threads: CONFIG.TRANSCODING.THREADS,
resolutions: {
'240p': CONFIG.TRANSCODING.RESOLUTIONS[ '240p' ],
import * as express from 'express'
import { extname, join } from 'path'
-import { VideoCreate, VideoPrivacy, VideoState, VideoUpdate } from '../../../../shared'
+import { VideoCreate, VideoPrivacy, VideoResolution, VideoState, VideoUpdate } from '../../../../shared'
import { getVideoFileFPS, getVideoFileResolution } from '../../../helpers/ffmpeg-utils'
import { logger } from '../../../helpers/logger'
import { auditLoggerFactory, getAuditIdFromRes, VideoAuditView } from '../../../helpers/audit-logger'
import { getFormattedObjects, getServerActor } from '../../../helpers/utils'
import { autoBlacklistVideoIfNeeded } from '../../../lib/video-blacklist'
-import { MIMETYPES, VIDEO_CATEGORIES, VIDEO_LANGUAGES, VIDEO_LICENCES, VIDEO_PRIVACIES } from '../../../initializers/constants'
+import { MIMETYPES, VIDEO_CATEGORIES, VIDEO_LANGUAGES, VIDEO_LICENCES, VIDEO_PRIVACIES, DEFAULT_AUDIO_RESOLUTION } from '../../../initializers/constants'
import {
changeVideoChannelShare,
federateVideoIfNeeded,
import { sequelizeTypescript } from '../../../initializers/database'
import { createVideoMiniatureFromExisting, generateVideoMiniature } from '../../../lib/thumbnail'
import { ThumbnailType } from '../../../../shared/models/videos/thumbnail.type'
+import { VideoTranscodingPayload } from '../../../lib/job-queue/handlers/video-transcoding'
const auditLogger = auditLoggerFactory('videos')
const videosRouter = express.Router()
const video = new VideoModel(videoData)
video.url = getVideoActivityPubUrl(video) // We use the UUID, so set the URL after building the object
- // Build the file object
- const { videoFileResolution } = await getVideoFileResolution(videoPhysicalFile.path)
- const fps = await getVideoFileFPS(videoPhysicalFile.path)
-
const videoFileData = {
extname: extname(videoPhysicalFile.filename),
- resolution: videoFileResolution,
- size: videoPhysicalFile.size,
- fps
+ size: videoPhysicalFile.size
}
const videoFile = new VideoFileModel(videoFileData)
+ if (!videoFile.isAudio()) {
+ videoFile.fps = await getVideoFileFPS(videoPhysicalFile.path)
+ videoFile.resolution = (await getVideoFileResolution(videoPhysicalFile.path)).videoFileResolution
+ } else {
+ videoFile.resolution = DEFAULT_AUDIO_RESOLUTION
+ }
+
// Move physical file
const videoDir = CONFIG.STORAGE.VIDEOS_DIR
const destination = join(videoDir, video.getVideoFilename(videoFile))
if (video.state === VideoState.TO_TRANSCODE) {
// Put uuid because we don't have id auto incremented for now
- const dataInput = {
- videoUUID: videoCreated.uuid,
- isNewVideo: true
+ let dataInput: VideoTranscodingPayload
+
+ if (videoFile.isAudio()) {
+ dataInput = {
+ type: 'merge-audio' as 'merge-audio',
+ resolution: DEFAULT_AUDIO_RESOLUTION,
+ videoUUID: videoCreated.uuid,
+ isNewVideo: true
+ }
+ } else {
+ dataInput = {
+ type: 'optimize' as 'optimize',
+ videoUUID: videoCreated.uuid,
+ isNewVideo: true
+ }
}
await JobQueue.Instance.createJob({ type: 'video-transcoding', payload: dataInput })
return res.sendFile(result.path, { maxAge: STATIC_MAX_AGE })
}
-async function generateNodeinfo (req: express.Request, res: express.Response, next: express.NextFunction) {
+async function generateNodeinfo (req: express.Request, res: express.Response) {
const { totalVideos } = await VideoModel.getStats()
const { totalLocalVideoComments } = await VideoCommentModel.getStats()
const { totalUsers } = await UserModel.getStats()
}
}
-type TranscodeOptions = {
+type TranscodeOptionsType = 'hls' | 'quick-transcode' | 'video' | 'merge-audio'
+
+interface BaseTranscodeOptions {
+ type: TranscodeOptionsType
inputPath: string
outputPath: string
resolution: VideoResolution
isPortraitMode?: boolean
- doQuickTranscode?: Boolean
+}
- hlsPlaylist?: {
+interface HLSTranscodeOptions extends BaseTranscodeOptions {
+ type: 'hls'
+ hlsPlaylist: {
videoFilename: string
}
}
+interface QuickTranscodeOptions extends BaseTranscodeOptions {
+ type: 'quick-transcode'
+}
+
+interface VideoTranscodeOptions extends BaseTranscodeOptions {
+ type: 'video'
+}
+
+interface MergeAudioTranscodeOptions extends BaseTranscodeOptions {
+ type: 'merge-audio'
+ audioPath: string
+}
+
+type TranscodeOptions = HLSTranscodeOptions | VideoTranscodeOptions | MergeAudioTranscodeOptions | QuickTranscodeOptions
+
function transcode (options: TranscodeOptions) {
return new Promise<void>(async (res, rej) => {
try {
let command = ffmpeg(options.inputPath, { niceness: FFMPEG_NICE.TRANSCODING })
.output(options.outputPath)
- if (options.doQuickTranscode) {
- if (options.hlsPlaylist) {
- throw(Error("Quick transcode and HLS can't be used at the same time"))
- }
-
- command
- .format('mp4')
- .addOption('-c:v copy')
- .addOption('-c:a copy')
- .outputOption('-map_metadata -1') // strip all metadata
- .outputOption('-movflags faststart')
- } else if (options.hlsPlaylist) {
+ if (options.type === 'quick-transcode') {
+ command = await buildQuickTranscodeCommand(command)
+ } else if (options.type === 'hls') {
command = await buildHLSCommand(command, options)
+ } else if (options.type === 'merge-audio') {
+ command = await buildAudioMergeCommand(command, options)
} else {
command = await buildx264Command(command, options)
}
return rej(err)
})
.on('end', () => {
- return onTranscodingSuccess(options)
+ return fixHLSPlaylistIfNeeded(options)
.then(() => res())
.catch(err => rej(err))
})
getVideoFileResolution,
getDurationFromVideoFile,
generateImageFromVideoFile,
+ TranscodeOptions,
+ TranscodeOptionsType,
transcode,
getVideoFileFPS,
computeResolutionsToTranscode,
// ---------------------------------------------------------------------------
-async function buildx264Command (command: ffmpeg.FfmpegCommand, options: TranscodeOptions) {
+async function buildx264Command (command: ffmpeg.FfmpegCommand, options: VideoTranscodeOptions) {
let fps = await getVideoFileFPS(options.inputPath)
// On small/medium resolutions, limit FPS
if (
fps = VIDEO_TRANSCODING_FPS.AVERAGE
}
- command = await presetH264(command, options.resolution, fps)
+ command = await presetH264(command, options.inputPath, options.resolution, fps)
if (options.resolution !== undefined) {
// '?x720' or '720x?' for example
return command
}
-async function buildHLSCommand (command: ffmpeg.FfmpegCommand, options: TranscodeOptions) {
+async function buildAudioMergeCommand (command: ffmpeg.FfmpegCommand, options: MergeAudioTranscodeOptions) {
+ command = command.loop(undefined)
+
+ command = await presetH264VeryFast(command, options.audioPath, options.resolution)
+
+ command = command.input(options.audioPath)
+ .videoFilter('scale=trunc(iw/2)*2:trunc(ih/2)*2') // Avoid "height not divisible by 2" error
+ .outputOption('-tune stillimage')
+ .outputOption('-shortest')
+
+ return command
+}
+
+async function buildQuickTranscodeCommand (command: ffmpeg.FfmpegCommand) {
+ command = await presetCopy(command)
+
+ command = command.outputOption('-map_metadata -1') // strip all metadata
+ .outputOption('-movflags faststart')
+
+ return command
+}
+
+async function buildHLSCommand (command: ffmpeg.FfmpegCommand, options: HLSTranscodeOptions) {
const videoPath = getHLSVideoPath(options)
command = await presetCopy(command)
return command
}
-function getHLSVideoPath (options: TranscodeOptions) {
+function getHLSVideoPath (options: HLSTranscodeOptions) {
return `${dirname(options.outputPath)}/${options.hlsPlaylist.videoFilename}`
}
-async function onTranscodingSuccess (options: TranscodeOptions) {
- if (!options.hlsPlaylist) return
+async function fixHLSPlaylistIfNeeded (options: TranscodeOptions) {
+ if (options.type !== 'hls') return
- // Fix wrong mapping with some ffmpeg versions
const fileContent = await readFile(options.outputPath)
const videoFileName = options.hlsPlaylist.videoFilename
const videoFilePath = getHLSVideoPath(options)
+ // Fix wrong mapping with some ffmpeg versions
const newContent = fileContent.toString()
.replace(`#EXT-X-MAP:URI="${videoFilePath}",`, `#EXT-X-MAP:URI="${videoFileName}",`)
* and quality. Superfast and ultrafast will give you better
* performance, but then quality is noticeably worse.
*/
-async function presetH264VeryFast (command: ffmpeg.FfmpegCommand, resolution: VideoResolution, fps: number): Promise<ffmpeg.FfmpegCommand> {
- let localCommand = await presetH264(command, resolution, fps)
+async function presetH264VeryFast (command: ffmpeg.FfmpegCommand, input: string, resolution: VideoResolution, fps?: number) {
+ let localCommand = await presetH264(command, input, resolution, fps)
+
localCommand = localCommand.outputOption('-preset:v veryfast')
- .outputOption([ '--aq-mode=2', '--aq-strength=1.3' ])
+
/*
MAIN reference: https://slhck.info/video/2017/03/01/rate-control.html
Our target situation is closer to a livestream than a stream,
since we want to reduce as much a possible the encoding burden,
- altough not to the point of a livestream where there is a hard
+ although not to the point of a livestream where there is a hard
constraint on the frames per second to be encoded.
-
- why '--aq-mode=2 --aq-strength=1.3' instead of '-profile:v main'?
- Make up for most of the loss of grain and macroblocking
- with less computing power.
*/
return localCommand
}
-/**
- * A preset optimised for a stillimage audio video
- */
-async function presetStillImageWithAudio (
- command: ffmpeg.FfmpegCommand,
- resolution: VideoResolution,
- fps: number
-): Promise<ffmpeg.FfmpegCommand> {
- let localCommand = await presetH264VeryFast(command, resolution, fps)
- localCommand = localCommand.outputOption('-tune stillimage')
-
- return localCommand
-}
-
/**
* A toolbox to play with audio
*/
namespace audio {
- export const get = (option: ffmpeg.FfmpegCommand | string) => {
+ export const get = (option: string) => {
// without position, ffprobe considers the last input only
// we make it consider the first input only
// if you pass a file path to pos, then ffprobe acts on that file directly
return res({ absolutePath: data.format.filename })
}
- if (typeof option === 'string') {
- return ffmpeg.ffprobe(option, parseFfprobe)
- }
-
- return option.ffprobe(parseFfprobe)
+ return ffmpeg.ffprobe(option, parseFfprobe)
})
}
* As for the audio, quality '5' is the highest and ensures 96-112kbps/channel
* See https://trac.ffmpeg.org/wiki/Encode/AAC#fdk_vbr
*/
-async function presetH264 (command: ffmpeg.FfmpegCommand, resolution: VideoResolution, fps: number): Promise<ffmpeg.FfmpegCommand> {
+async function presetH264 (command: ffmpeg.FfmpegCommand, input: string, resolution: VideoResolution, fps?: number) {
let localCommand = command
.format('mp4')
.videoCodec('libx264')
.outputOption('-map_metadata -1') // strip all metadata
.outputOption('-movflags faststart')
- const parsedAudio = await audio.get(localCommand)
+ const parsedAudio = await audio.get(input)
if (!parsedAudio.audioStream) {
localCommand = localCommand.noAudio()
.audioCodec('libfdk_aac')
.audioQuality(5)
} else {
- // we try to reduce the ceiling bitrate by making rough correspondances of bitrates
+ // we try to reduce the ceiling bitrate by making rough matches of bitrates
// of course this is far from perfect, but it might save some space in the end
+ localCommand = localCommand.audioCodec('aac')
+
const audioCodecName = parsedAudio.audioStream[ 'codec_name' ]
- let bitrate: number
- if (audio.bitrate[ audioCodecName ]) {
- localCommand = localCommand.audioCodec('aac')
- bitrate = audio.bitrate[ audioCodecName ](parsedAudio.audioStream[ 'bit_rate' ])
+ if (audio.bitrate[ audioCodecName ]) {
+ const bitrate = audio.bitrate[ audioCodecName ](parsedAudio.audioStream[ 'bit_rate' ])
if (bitrate !== undefined && bitrate !== -1) localCommand = localCommand.audioBitrate(bitrate)
}
}
- // Constrained Encoding (VBV)
- // https://slhck.info/video/2017/03/01/rate-control.html
- // https://trac.ffmpeg.org/wiki/Limiting%20the%20output%20bitrate
- const targetBitrate = getTargetBitrate(resolution, fps, VIDEO_TRANSCODING_FPS)
- localCommand = localCommand.outputOptions([`-maxrate ${ targetBitrate }`, `-bufsize ${ targetBitrate * 2 }`])
-
- // Keyframe interval of 2 seconds for faster seeking and resolution switching.
- // https://streaminglearningcenter.com/blogs/whats-the-right-keyframe-interval.html
- // https://superuser.com/a/908325
- localCommand = localCommand.outputOption(`-g ${ fps * 2 }`)
+ if (fps) {
+ // Constrained Encoding (VBV)
+ // https://slhck.info/video/2017/03/01/rate-control.html
+ // https://trac.ffmpeg.org/wiki/Limiting%20the%20output%20bitrate
+ const targetBitrate = getTargetBitrate(resolution, fps, VIDEO_TRANSCODING_FPS)
+ localCommand = localCommand.outputOptions([ `-maxrate ${targetBitrate}`, `-bufsize ${targetBitrate * 2}` ])
+
+ // Keyframe interval of 2 seconds for faster seeking and resolution switching.
+ // https://streaminglearningcenter.com/blogs/whats-the-right-keyframe-interval.html
+ // https://superuser.com/a/908325
+ localCommand = localCommand.outputOption(`-g ${fps * 2}`)
+ }
return localCommand
}
TRANSCODING: {
get ENABLED () { return config.get<boolean>('transcoding.enabled') },
get ALLOW_ADDITIONAL_EXTENSIONS () { return config.get<boolean>('transcoding.allow_additional_extensions') },
+ get ALLOW_AUDIO_FILES () { return config.get<boolean>('transcoding.allow_audio_files') },
get THREADS () { return config.get<number>('transcoding.threads') },
RESOLUTIONS: {
get '240p' () { return config.get<boolean>('transcoding.resolutions.240p') },
import { join } from 'path'
-import { JobType, VideoRateType, VideoState } from '../../shared/models'
+import { JobType, VideoRateType, VideoResolution, VideoState } from '../../shared/models'
import { ActivityPubActorType } from '../../shared/models/activitypub'
import { FollowState } from '../../shared/models/actors'
import { VideoAbuseState, VideoImportState, VideoPrivacy, VideoTranscodingFPS } from '../../shared/models/videos'
// Do not use barrels, remain constants as independent as possible
-import { isTestInstance, sanitizeHost, sanitizeUrl } from '../helpers/core-utils'
+import { isTestInstance, sanitizeHost, sanitizeUrl, root } from '../helpers/core-utils'
import { NSFWPolicyType } from '../../shared/models/videos/nsfw-policy.type'
import { invert } from 'lodash'
import { CronRepeatOptions, EveryRepeatOptions } from 'bull'
max: 2 * 1024 * 1024 // 2MB
}
},
- EXTNAME: buildVideosExtname(),
+ EXTNAME: [] as string[],
INFO_HASH: { min: 40, max: 40 }, // Length, info hash is 20 bytes length but we represent it in hexadecimal so 20 * 2
DURATION: { min: 0 }, // Number
TAGS: { min: 0, max: 5 }, // Number of total tags
KEEP_ORIGIN_FPS_RESOLUTION_MIN: 720 // We keep the original FPS on high resolutions (720 minimum)
}
+const DEFAULT_AUDIO_RESOLUTION = VideoResolution.H_480P
+
const VIDEO_RATE_TYPES: { [ id: string ]: VideoRateType } = {
LIKE: 'like',
DISLIKE: 'dislike'
}
const MIMETYPES = {
+ AUDIO: {
+ MIMETYPE_EXT: {
+ 'audio/mpeg': '.mp3',
+ 'audio/mp3': '.mp3',
+ 'application/ogg': '.ogg',
+ 'audio/ogg': '.ogg',
+ 'audio/flac': '.flac'
+ },
+ EXT_MIMETYPE: null as { [ id: string ]: string }
+ },
VIDEO: {
- MIMETYPE_EXT: buildVideoMimetypeExt(),
+ MIMETYPE_EXT: null as { [ id: string ]: string },
EXT_MIMETYPE: null as { [ id: string ]: string }
},
IMAGE: {
}
}
}
-MIMETYPES.VIDEO.EXT_MIMETYPE = invert(MIMETYPES.VIDEO.MIMETYPE_EXT)
+MIMETYPES.AUDIO.EXT_MIMETYPE = invert(MIMETYPES.AUDIO.MIMETYPE_EXT)
// ---------------------------------------------------------------------------
COLLECTION_ITEMS_PER_PAGE: 10,
FETCH_PAGE_LIMIT: 100,
URL_MIME_TYPES: {
- VIDEO: Object.keys(MIMETYPES.VIDEO.MIMETYPE_EXT),
+ VIDEO: [] as string[],
TORRENT: [ 'application/x-bittorrent' ],
MAGNET: [ 'application/x-bittorrent;x-scheme-handler/magnet' ]
},
const ACCEPT_HEADERS = [ 'html', 'application/json' ].concat(ACTIVITY_PUB.POTENTIAL_ACCEPT_HEADERS)
+const ASSETS_PATH = {
+ DEFAULT_AUDIO_BACKGROUND: join(root(), 'server', 'assets', 'default-audio-background.jpg')
+}
+
// ---------------------------------------------------------------------------
const CUSTOM_HTML_TAG_COMMENTS = {
}
updateWebserverUrls()
+updateWebserverConfig()
registerConfigChangedHandler(() => {
updateWebserverUrls()
RATES_LIMIT,
MIMETYPES,
CRAWL_REQUEST_CONCURRENCY,
+ DEFAULT_AUDIO_RESOLUTION,
JOB_COMPLETED_LIFETIME,
HTTP_SIGNATURE,
VIDEO_IMPORT_STATES,
VIDEO_VIEW_LIFETIME,
CONTACT_FORM_LIFETIME,
VIDEO_PLAYLIST_PRIVACIES,
+ ASSETS_PATH,
loadLanguages,
buildLanguages
}
'video/mp4': '.mp4'
}
- if (CONFIG.TRANSCODING.ENABLED && CONFIG.TRANSCODING.ALLOW_ADDITIONAL_EXTENSIONS) {
- Object.assign(data, {
- 'video/quicktime': '.mov',
- 'video/x-msvideo': '.avi',
- 'video/x-flv': '.flv',
- 'video/x-matroska': '.mkv',
- 'application/octet-stream': '.mkv',
- 'video/avi': '.avi'
- })
+ if (CONFIG.TRANSCODING.ENABLED) {
+ if (CONFIG.TRANSCODING.ALLOW_ADDITIONAL_EXTENSIONS) {
+ Object.assign(data, {
+ 'video/quicktime': '.mov',
+ 'video/x-msvideo': '.avi',
+ 'video/x-flv': '.flv',
+ 'video/x-matroska': '.mkv',
+ 'application/octet-stream': '.mkv',
+ 'video/avi': '.avi'
+ })
+ }
+
+ if (CONFIG.TRANSCODING.ALLOW_AUDIO_FILES) {
+ Object.assign(data, MIMETYPES.AUDIO.MIMETYPE_EXT)
+ }
}
return data
}
function updateWebserverConfig () {
- CONSTRAINTS_FIELDS.VIDEOS.EXTNAME = buildVideosExtname()
-
MIMETYPES.VIDEO.MIMETYPE_EXT = buildVideoMimetypeExt()
MIMETYPES.VIDEO.EXT_MIMETYPE = invert(MIMETYPES.VIDEO.MIMETYPE_EXT)
+ ACTIVITY_PUB.URL_MIME_TYPES.VIDEO = Object.keys(MIMETYPES.VIDEO.MIMETYPE_EXT)
+
+ CONSTRAINTS_FIELDS.VIDEOS.EXTNAME = buildVideosExtname()
}
function buildVideosExtname () {
- return CONFIG.TRANSCODING.ENABLED && CONFIG.TRANSCODING.ALLOW_ADDITIONAL_EXTENSIONS
- ? [ '.mp4', '.ogv', '.webm', '.mkv', '.mov', '.avi', '.flv' ]
- : [ '.mp4', '.ogv', '.webm' ]
+ return Object.keys(MIMETYPES.VIDEO.EXT_MIMETYPE)
}
function loadLanguages () {
const video = await VideoModel.loadByUUIDWithFile(videoUUID)
if (!video) return undefined
- if (video.isOwned()) return { isOwned: true, path: join(CONFIG.STORAGE.PREVIEWS_DIR, video.getPreview().filename) }
+ if (video.isOwned()) return { isOwned: true, path: video.getPreview().getPath() }
return this.loadRemoteFile(videoUUID)
}
import * as Bull from 'bull'
import { logger } from '../../../helpers/logger'
import { VideoModel } from '../../../models/video/video'
-import { publishVideoIfNeeded } from './video-transcoding'
+import { publishNewResolutionIfNeeded } from './video-transcoding'
import { getVideoFileFPS, getVideoFileResolution } from '../../../helpers/ffmpeg-utils'
import { copy, stat } from 'fs-extra'
import { VideoFileModel } from '../../../models/video/video-file'
await updateVideoFile(video, payload.filePath)
- await publishVideoIfNeeded(video)
+ await publishNewResolutionIfNeeded(video)
return video
}
if (videoImportUpdated.Video.state === VideoState.TO_TRANSCODE) {
// Put uuid because we don't have id auto incremented for now
const dataInput = {
+ type: 'optimize' as 'optimize',
videoUUID: videoImportUpdated.Video.uuid,
isNewVideo: true
}
import { sequelizeTypescript } from '../../../initializers'
import * as Bluebird from 'bluebird'
import { computeResolutionsToTranscode } from '../../../helpers/ffmpeg-utils'
-import { generateHlsPlaylist, optimizeVideofile, transcodeOriginalVideofile } from '../../video-transcoding'
+import { generateHlsPlaylist, optimizeVideofile, transcodeOriginalVideofile, mergeAudioVideofile } from '../../video-transcoding'
import { Notifier } from '../../notifier'
import { CONFIG } from '../../../initializers/config'
-export type VideoTranscodingPayload = {
+interface BaseTranscodingPayload {
videoUUID: string
- resolution?: VideoResolution
isNewVideo?: boolean
+}
+
+interface HLSTranscodingPayload extends BaseTranscodingPayload {
+ type: 'hls'
+ isPortraitMode?: boolean
+ resolution: VideoResolution
+}
+
+interface NewResolutionTranscodingPayload extends BaseTranscodingPayload {
+ type: 'new-resolution'
isPortraitMode?: boolean
- generateHlsPlaylist?: boolean
+ resolution: VideoResolution
+}
+
+interface MergeAudioTranscodingPayload extends BaseTranscodingPayload {
+ type: 'merge-audio'
+ resolution: VideoResolution
+}
+
+interface OptimizeTranscodingPayload extends BaseTranscodingPayload {
+ type: 'optimize'
}
+export type VideoTranscodingPayload = HLSTranscodingPayload | NewResolutionTranscodingPayload
+ | OptimizeTranscodingPayload | MergeAudioTranscodingPayload
+
async function processVideoTranscoding (job: Bull.Job) {
const payload = job.data as VideoTranscodingPayload
logger.info('Processing video file in job %d.', job.id)
return undefined
}
- if (payload.generateHlsPlaylist) {
+ if (payload.type === 'hls') {
await generateHlsPlaylist(video, payload.resolution, payload.isPortraitMode || false)
await retryTransactionWrapper(onHlsPlaylistGenerationSuccess, video)
- } else if (payload.resolution) { // Transcoding in other resolution
+ } else if (payload.type === 'new-resolution') {
await transcodeOriginalVideofile(video, payload.resolution, payload.isPortraitMode || false)
- await retryTransactionWrapper(publishVideoIfNeeded, video, payload)
+ await retryTransactionWrapper(publishNewResolutionIfNeeded, video, payload)
+ } else if (payload.type === 'merge-audio') {
+ await mergeAudioVideofile(video, payload.resolution)
+
+ await retryTransactionWrapper(publishNewResolutionIfNeeded, video, payload)
} else {
await optimizeVideofile(video)
})
}
-async function publishVideoIfNeeded (video: VideoModel, payload?: VideoTranscodingPayload) {
+async function publishNewResolutionIfNeeded (video: VideoModel, payload?: NewResolutionTranscodingPayload | MergeAudioTranscodingPayload) {
const { videoDatabase, videoPublished } = await sequelizeTypescript.transaction(async t => {
// Maybe the video changed in database, refresh it
let videoDatabase = await VideoModel.loadAndPopulateAccountAndServerAndTags(video.uuid, t)
await createHlsJobIfEnabled(payload)
}
-async function onVideoFileOptimizerSuccess (videoArg: VideoModel, payload: VideoTranscodingPayload) {
+async function onVideoFileOptimizerSuccess (videoArg: VideoModel, payload: OptimizeTranscodingPayload) {
if (videoArg === undefined) return undefined
// Outside the transaction (IO on disk)
for (const resolution of resolutionsEnabled) {
const dataInput = {
+ type: 'new-resolution' as 'new-resolution',
videoUUID: videoDatabase.uuid,
resolution
}
if (payload.isNewVideo) Notifier.Instance.notifyOnNewVideo(videoDatabase)
if (videoPublished) Notifier.Instance.notifyOnVideoPublishedAfterTranscoding(videoDatabase)
- await createHlsJobIfEnabled(Object.assign({}, payload, { resolution: videoDatabase.getOriginalFile().resolution }))
+ const hlsPayload = Object.assign({}, payload, { resolution: videoDatabase.getOriginalFile().resolution })
+ await createHlsJobIfEnabled(hlsPayload)
}
// ---------------------------------------------------------------------------
export {
processVideoTranscoding,
- publishVideoIfNeeded
+ publishNewResolutionIfNeeded
}
// ---------------------------------------------------------------------------
-function createHlsJobIfEnabled (payload?: VideoTranscodingPayload) {
+function createHlsJobIfEnabled (payload?: { videoUUID: string, resolution: number, isPortraitMode?: boolean }) {
// Generate HLS playlist?
if (payload && CONFIG.TRANSCODING.HLS.ENABLED) {
const hlsTranscodingPayload = {
+ type: 'hls' as 'hls',
videoUUID: payload.videoUUID,
resolution: payload.resolution,
- isPortraitMode: payload.isPortraitMode,
-
- generateHlsPlaylist: true
+ isPortraitMode: payload.isPortraitMode
}
return JobQueue.Instance.createJob({ type: 'video-transcoding', payload: hlsTranscodingPayload })
import { VideoFileModel } from '../models/video/video-file'
import { generateImageFromVideoFile } from '../helpers/ffmpeg-utils'
import { CONFIG } from '../initializers/config'
-import { PREVIEWS_SIZE, THUMBNAILS_SIZE } from '../initializers/constants'
+import { PREVIEWS_SIZE, THUMBNAILS_SIZE, ASSETS_PATH } from '../initializers/constants'
import { VideoModel } from '../models/video/video'
import { ThumbnailModel } from '../models/video/thumbnail'
import { ThumbnailType } from '../../shared/models/videos/thumbnail.type'
function generateVideoMiniature (video: VideoModel, videoFile: VideoFileModel, type: ThumbnailType) {
const input = video.getVideoFilePath(videoFile)
- const { filename, basePath, height, width, existingThumbnail } = buildMetadataFromVideo(video, type)
- const thumbnailCreator = () => generateImageFromVideoFile(input, basePath, filename, { height, width })
+ const { filename, basePath, height, width, existingThumbnail, outputPath } = buildMetadataFromVideo(video, type)
+ const thumbnailCreator = videoFile.isAudio()
+ ? () => processImage(ASSETS_PATH.DEFAULT_AUDIO_BACKGROUND, outputPath, { width, height }, true)
+ : () => generateImageFromVideoFile(input, basePath, filename, { height, width })
return createThumbnailFromFunction({ thumbnailCreator, filename, height, width, type, existingThumbnail })
}
import { HLS_STREAMING_PLAYLIST_DIRECTORY, P2P_MEDIA_LOADER_PEER_VERSION, WEBSERVER } from '../initializers/constants'
import { join } from 'path'
-import { getVideoFileFPS, transcode, canDoQuickTranscode } from '../helpers/ffmpeg-utils'
+import { canDoQuickTranscode, getVideoFileFPS, transcode, TranscodeOptions, TranscodeOptionsType } from '../helpers/ffmpeg-utils'
import { ensureDir, move, remove, stat } from 'fs-extra'
import { logger } from '../helpers/logger'
import { VideoResolution } from '../../shared/models/videos'
const videoInputPath = join(videosDirectory, video.getVideoFilename(inputVideoFile))
const videoTranscodedPath = join(transcodeDirectory, video.id + '-transcoded' + newExtname)
- const doQuickTranscode = await(canDoQuickTranscode(videoInputPath))
+ const transcodeType: TranscodeOptionsType = await canDoQuickTranscode(videoInputPath)
+ ? 'quick-transcode'
+ : 'video'
- const transcodeOptions = {
+ const transcodeOptions: TranscodeOptions = {
+ type: transcodeType as any, // FIXME: typing issue
inputPath: videoInputPath,
outputPath: videoTranscodedPath,
- resolution: inputVideoFile.resolution,
- doQuickTranscode
+ resolution: inputVideoFile.resolution
}
// Could be very long!
await remove(videoInputPath)
// Important to do this before getVideoFilename() to take in account the new file extension
- inputVideoFile.set('extname', newExtname)
-
- const stats = await stat(videoTranscodedPath)
- const fps = await getVideoFileFPS(videoTranscodedPath)
+ inputVideoFile.extname = newExtname
const videoOutputPath = video.getVideoFilePath(inputVideoFile)
- await move(videoTranscodedPath, videoOutputPath)
- inputVideoFile.set('size', stats.size)
- inputVideoFile.set('fps', fps)
-
- await video.createTorrentAndSetInfoHash(inputVideoFile)
- await inputVideoFile.save()
+ await onVideoFileTranscoding(video, inputVideoFile, videoTranscodedPath, videoOutputPath)
} catch (err) {
// Auto destruction...
video.destroy().catch(err => logger.error('Cannot destruct video after transcoding failure.', { err }))
const videoTranscodedPath = join(transcodeDirectory, video.getVideoFilename(newVideoFile))
const transcodeOptions = {
+ type: 'video' as 'video',
inputPath: videoInputPath,
outputPath: videoTranscodedPath,
resolution,
await transcode(transcodeOptions)
- const stats = await stat(videoTranscodedPath)
- const fps = await getVideoFileFPS(videoTranscodedPath)
+ return onVideoFileTranscoding(video, newVideoFile, videoTranscodedPath, videoOutputPath)
+}
+
+async function mergeAudioVideofile (video: VideoModel, resolution: VideoResolution) {
+ const videosDirectory = CONFIG.STORAGE.VIDEOS_DIR
+ const transcodeDirectory = CONFIG.STORAGE.TMP_DIR
+ const newExtname = '.mp4'
+
+ const inputVideoFile = video.getOriginalFile()
- await move(videoTranscodedPath, videoOutputPath)
+ const audioInputPath = join(videosDirectory, video.getVideoFilename(video.getOriginalFile()))
+ const videoTranscodedPath = join(transcodeDirectory, video.id + '-transcoded' + newExtname)
- newVideoFile.set('size', stats.size)
- newVideoFile.set('fps', fps)
+ const transcodeOptions = {
+ type: 'merge-audio' as 'merge-audio',
+ inputPath: video.getPreview().getPath(),
+ outputPath: videoTranscodedPath,
+ audioPath: audioInputPath,
+ resolution
+ }
- await video.createTorrentAndSetInfoHash(newVideoFile)
+ await transcode(transcodeOptions)
- await newVideoFile.save()
+ await remove(audioInputPath)
- video.VideoFiles.push(newVideoFile)
+ // Important to do this before getVideoFilename() to take in account the new file extension
+ inputVideoFile.extname = newExtname
+
+ const videoOutputPath = video.getVideoFilePath(inputVideoFile)
+
+ return onVideoFileTranscoding(video, inputVideoFile, videoTranscodedPath, videoOutputPath)
}
async function generateHlsPlaylist (video: VideoModel, resolution: VideoResolution, isPortraitMode: boolean) {
const outputPath = join(baseHlsDirectory, VideoStreamingPlaylistModel.getHlsPlaylistFilename(resolution))
const transcodeOptions = {
+ type: 'hls' as 'hls',
inputPath: videoInputPath,
outputPath,
resolution,
})
}
+// ---------------------------------------------------------------------------
+
export {
generateHlsPlaylist,
optimizeVideofile,
- transcodeOriginalVideofile
+ transcodeOriginalVideofile,
+ mergeAudioVideofile
+}
+
+// ---------------------------------------------------------------------------
+
+async function onVideoFileTranscoding (video: VideoModel, videoFile: VideoFileModel, transcodingPath: string, outputPath: string) {
+ const stats = await stat(transcodingPath)
+ const fps = await getVideoFileFPS(transcodingPath)
+
+ await move(transcodingPath, outputPath)
+
+ videoFile.set('size', stats.size)
+ videoFile.set('fps', fps)
+
+ await video.createTorrentAndSetInfoHash(videoFile)
+
+ const updatedVideoFile = await videoFile.save()
+
+ // Add it if this is a new created file
+ if (video.VideoFiles.some(f => f.id === videoFile.id) === false) {
+ video.VideoFiles.push(updatedVideoFile)
+ }
+
+ return video
}
return WEBSERVER.URL + staticPath + this.filename
}
- removeThumbnail () {
+ getPath () {
const directory = ThumbnailModel.types[this.type].directory
- const thumbnailPath = join(directory, this.filename)
+ return join(directory, this.filename)
+ }
- return remove(thumbnailPath)
+ removeThumbnail () {
+ return remove(this.getPath())
}
}
import { VideoRedundancyModel } from '../redundancy/video-redundancy'
import { VideoStreamingPlaylistModel } from './video-streaming-playlist'
import { FindOptions, QueryTypes, Transaction } from 'sequelize'
+import { MIMETYPES } from '../../initializers/constants'
@Table({
tableName: 'videoFile',
}))
}
+ isAudio () {
+ return !!MIMETYPES.AUDIO.EXT_MIMETYPE[this.extname]
+ }
+
hasSameUniqueKeysThan (other: VideoFileModel) {
return this.fps === other.fps &&
this.resolution === other.resolution &&
transcoding: {
enabled: true,
allowAdditionalExtensions: true,
+ allowAudioFiles: true,
threads: 1,
resolutions: {
'240p': false,
expect(data.user.videoQuotaDaily).to.equal(-1)
expect(data.transcoding.enabled).to.be.false
expect(data.transcoding.allowAdditionalExtensions).to.be.false
+ expect(data.transcoding.allowAudioFiles).to.be.false
expect(data.transcoding.threads).to.equal(2)
expect(data.transcoding.resolutions['240p']).to.be.true
expect(data.transcoding.resolutions['360p']).to.be.true
expect(data.transcoding.enabled).to.be.true
expect(data.transcoding.threads).to.equal(1)
expect(data.transcoding.allowAdditionalExtensions).to.be.true
+ expect(data.transcoding.allowAudioFiles).to.be.true
expect(data.transcoding.resolutions['240p']).to.be.false
expect(data.transcoding.resolutions['360p']).to.be.true
expect(data.transcoding.resolutions['480p']).to.be.true
transcoding: {
enabled: true,
allowAdditionalExtensions: true,
+ allowAudioFiles: true,
threads: 1,
resolutions: {
'240p': false,
transcoding: {
enabled: true,
allowAdditionalExtensions: true,
+ allowAudioFiles: true,
threads: 1,
resolutions: {
'240p': false,
transcoding: {
enabled: boolean
allowAdditionalExtensions: boolean
+ allowAudioFiles: boolean
threads: number
resolutions: {
'240p': boolean