1 // From https://github.com/MinEduTDF/idb-chunk-store
2 // We use temporary IndexDB (all data are removed on destroy) to avoid RAM issues
3 // Thanks @santiagogil and @Feross
5 import { EventEmitter } from 'events'
6 import Dexie from 'dexie'
8 class ChunkDatabase extends Dexie {
9 chunks: Dexie.Table<{ id: number, buf: Buffer }, number>
11 constructor (dbname: string) {
14 this.version(1).stores({
20 class ExpirationDatabase extends Dexie {
21 databases: Dexie.Table<{ name: string, expiration: number }, number>
24 super('webtorrent-expiration')
26 this.version(1).stores({
27 databases: 'name,expiration'
32 export class PeertubeChunkStore extends EventEmitter {
33 private static readonly BUFFERING_PUT_MS = 1000
34 private static readonly CLEANER_INTERVAL_MS = 1000 * 60 // 1 minute
35 private static readonly CLEANER_EXPIRATION_MS = 1000 * 60 * 5 // 5 minutes
39 private pendingPut: { id: number, buf: Buffer, cb: (err?: Error) => void }[] = []
40 // If the store is full
41 private memoryChunks: { [ id: number ]: Buffer | true } = {}
42 private databaseName: string
43 private putBulkTimeout: any
44 private cleanerInterval: any
45 private db: ChunkDatabase
46 private expirationDB: ExpirationDatabase
47 private readonly length: number
48 private readonly lastChunkLength: number
49 private readonly lastChunkIndex: number
51 constructor (chunkLength: number, opts: any) {
54 this.databaseName = 'webtorrent-chunks-'
57 if (opts.torrent?.infoHash) this.databaseName += opts.torrent.infoHash
58 else this.databaseName += '-default'
60 this.setMaxListeners(100)
62 this.chunkLength = Number(chunkLength)
63 if (!this.chunkLength) throw new Error('First argument must be a chunk length')
65 this.length = Number(opts.length) || Infinity
67 if (this.length !== Infinity) {
68 this.lastChunkLength = (this.length % this.chunkLength) || this.chunkLength
69 this.lastChunkIndex = Math.ceil(this.length / this.chunkLength) - 1
72 this.db = new ChunkDatabase(this.databaseName)
73 // Track databases that expired
74 this.expirationDB = new ExpirationDatabase()
79 put (index: number, buf: Buffer, cb: (err?: Error) => void) {
80 const isLastChunk = (index === this.lastChunkIndex)
81 if (isLastChunk && buf.length !== this.lastChunkLength) {
82 return this.nextTick(cb, new Error('Last chunk length must be ' + this.lastChunkLength))
84 if (!isLastChunk && buf.length !== this.chunkLength) {
85 return this.nextTick(cb, new Error('Chunk length must be ' + this.chunkLength))
88 // Specify we have this chunk
89 this.memoryChunks[index] = true
91 // Add it to the pending put
92 this.pendingPut.push({ id: index, buf, cb })
93 // If it's already planned, return
94 if (this.putBulkTimeout) return
96 // Plan a future bulk insert
97 this.putBulkTimeout = setTimeout(async () => {
98 const processing = this.pendingPut
100 this.putBulkTimeout = undefined
103 await this.db.transaction('rw', this.db.chunks, () => {
104 return this.db.chunks.bulkPut(processing.map(p => ({ id: p.id, buf: p.buf })))
107 console.log('Cannot bulk insert chunks. Store them in memory.', { err })
109 processing.forEach(p => {
110 this.memoryChunks[p.id] = p.buf
113 processing.forEach(p => p.cb())
115 }, PeertubeChunkStore.BUFFERING_PUT_MS)
118 get (index: number, opts: any, cb: (err?: Error, buf?: Buffer) => void): void {
119 if (typeof opts === 'function') return this.get(index, null, opts)
121 // IndexDB could be slow, use our memory index first
122 const memoryChunk = this.memoryChunks[index]
123 if (memoryChunk === undefined) {
124 const err = new Error('Chunk not found') as any
125 err['notFound'] = true
127 return process.nextTick(() => cb(err))
131 if (memoryChunk !== true) return cb(null, memoryChunk)
134 this.db.transaction('r', this.db.chunks, async () => {
135 const result = await this.db.chunks.get({ id: index })
136 if (result === undefined) return cb(null, Buffer.alloc(0))
138 const buf = result.buf
139 if (!opts) return this.nextTick(cb, null, buf)
141 const offset = opts.offset || 0
142 const len = opts.length || (buf.length - offset)
143 return cb(null, buf.slice(offset, len + offset))
151 close (cb: (err?: Error) => void) {
152 return this.destroy(cb)
155 async destroy (cb: (err?: Error) => void) {
157 if (this.pendingPut) {
158 clearTimeout(this.putBulkTimeout)
159 this.pendingPut = null
161 if (this.cleanerInterval) {
162 clearInterval(this.cleanerInterval)
163 this.cleanerInterval = null
169 await this.dropDatabase(this.databaseName)
172 if (this.expirationDB) {
173 this.expirationDB.close()
174 this.expirationDB = null
179 console.error('Cannot destroy peertube chunk store.', err)
184 private runCleaner () {
185 this.checkExpiration()
187 this.cleanerInterval = setInterval(() => {
188 this.checkExpiration()
189 }, PeertubeChunkStore.CLEANER_INTERVAL_MS)
192 private async checkExpiration () {
193 let databasesToDeleteInfo: { name: string }[] = []
196 await this.expirationDB.transaction('rw', this.expirationDB.databases, async () => {
197 // Update our database expiration since we are alive
198 await this.expirationDB.databases.put({
199 name: this.databaseName,
200 expiration: new Date().getTime() + PeertubeChunkStore.CLEANER_EXPIRATION_MS
203 const now = new Date().getTime()
204 databasesToDeleteInfo = await this.expirationDB.databases.where('expiration').below(now).toArray()
207 console.error('Cannot update expiration of fetch expired databases.', err)
210 for (const databaseToDeleteInfo of databasesToDeleteInfo) {
211 await this.dropDatabase(databaseToDeleteInfo.name)
215 private async dropDatabase (databaseName: string) {
216 const dbToDelete = new ChunkDatabase(databaseName)
217 console.log('Destroying IndexDB database %s.', databaseName)
220 await dbToDelete.delete()
222 await this.expirationDB.transaction('rw', this.expirationDB.databases, () => {
223 return this.expirationDB.databases.where({ name: databaseName }).delete()
226 console.error('Cannot delete %s.', databaseName, err)
230 private nextTick <T> (cb: (err?: Error, val?: T) => void, err: Error, val?: T) {
231 process.nextTick(() => cb(err, val), undefined)