This commit is contained in:
Matt Manning 2026-02-22 20:00:01 -07:00 committed by GitHub
commit 8255c3d0c7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 562 additions and 145 deletions

View file

@ -20,7 +20,7 @@
<div v-if="comicMetadata" class="absolute top-0 left-16 sm:left-20 bg-bg text-gray-100 border-b border-l border-r border-gray-400 hover:bg-black-200 cursor-pointer rounded-b-md w-10 h-9 flex items-center justify-center text-center z-20" @mousedown.prevent @click.stop.prevent="clickShowInfoMenu">
<span class="material-symbols text-xl">more</span>
</div>
<a v-if="pages && numPages" :href="mainImg" :download="pages[page - 1]" class="absolute top-0 bg-bg text-gray-100 border-b border-l border-r border-gray-400 hover:bg-black-200 cursor-pointer rounded-b-md w-10 h-9 flex items-center justify-center text-center z-20" :class="comicMetadata ? 'left-28 sm:left-32' : 'left-16 sm:left-20'">
<a v-if="pages && numPages && mainImg" :href="currentPageUrl" :download="pages[page - 1]" class="absolute top-0 bg-bg text-gray-100 border-b border-l border-r border-gray-400 hover:bg-black-200 cursor-pointer rounded-b-md w-10 h-9 flex items-center justify-center text-center z-20" :class="comicMetadata ? 'left-28 sm:left-32' : 'left-16 sm:left-20'">
<span class="material-symbols text-xl">download</span>
</a>
@ -45,7 +45,7 @@
</div>
<div ref="imageContainer" class="w-full h-full relative overflow-auto">
<div class="h-full flex" :class="scale > 100 ? '' : 'justify-center'">
<img v-if="mainImg" :style="{ minWidth: scale + '%', width: scale + '%' }" :src="mainImg" class="object-contain m-auto" />
<img v-if="mainImg" :style="{ minWidth: scale + '%', width: scale + '%' }" :src="mainImg" class="object-contain m-auto" @load="onImageLoad" @error="onImageError" />
</div>
</div>
<div v-show="loading" class="w-full h-full absolute top-0 left-0 flex items-center justify-center z-10">
@ -57,17 +57,11 @@
<script>
import Path from 'path'
import { Archive } from 'libarchive.js/main.js'
import { CompressedFile } from 'libarchive.js/src/compressed-file'
// This is % with respect to the screen width
const MAX_SCALE = 400
const MIN_SCALE = 10
Archive.init({
workerUrl: '/libarchive/worker-bundle.js'
})
export default {
props: {
libraryItem: {
@ -82,7 +76,7 @@ export default {
return {
loading: false,
pages: null,
filesObject: null,
fileIno: null,
mainImg: null,
page: 0,
numPages: 0,
@ -92,14 +86,16 @@ export default {
loadTimeout: null,
loadedFirstPage: false,
comicMetadata: null,
scale: 80
scale: 80,
// Preload adjacent pages
preloadedPages: new Map()
}
},
watch: {
url: {
libraryItemId: {
immediate: true,
handler() {
this.extract()
this.loadComicMetadata()
}
}
},
@ -107,11 +103,18 @@ export default {
libraryItemId() {
return this.libraryItem?.id
},
ebookUrl() {
comicPagesUrl() {
if (this.fileId) {
return `/api/items/${this.libraryItemId}/ebook/${this.fileId}`
return `/api/items/${this.libraryItemId}/comic-pages/${this.fileId}`
}
return `/api/items/${this.libraryItemId}/ebook`
return `/api/items/${this.libraryItemId}/comic-pages`
},
currentPageUrl() {
if (!this.libraryItemId || !this.page) return null
if (this.fileId) {
return `/api/items/${this.libraryItemId}/comic-page/${this.page}/${this.fileId}`
}
return `/api/items/${this.libraryItemId}/comic-page/${this.page}`
},
comicMetadataKeys() {
return this.comicMetadata ? Object.keys(this.comicMetadata) : []
@ -136,12 +139,13 @@ export default {
cleanedPageNames() {
return (
this.pages?.map((p) => {
if (p.length > 50) {
let firstHalf = p.slice(0, 22)
let lastHalf = p.slice(p.length - 23)
const filename = typeof p === 'object' ? p.filename : p
if (filename.length > 50) {
let firstHalf = filename.slice(0, 22)
let lastHalf = filename.slice(filename.length - 23)
return `${firstHalf} ... ${lastHalf}`
}
return p
return filename
}) || []
)
},
@ -192,157 +196,118 @@ export default {
if (!this.canGoPrev) return
this.setPage(this.page - 1)
},
setPage(page) {
if (page <= 0 || page > this.numPages) {
getPageUrl(pageNum) {
if (this.fileId) {
return `/api/items/${this.libraryItemId}/comic-page/${pageNum}/${this.fileId}`
}
return `/api/items/${this.libraryItemId}/comic-page/${pageNum}`
},
setPage(pageNum) {
if (pageNum <= 0 || pageNum > this.numPages) {
return
}
this.showPageMenu = false
this.showInfoMenu = false
const filename = this.pages[page - 1]
this.page = page
this.page = pageNum
this.updateProgress()
return this.extractFile(filename)
this.loadPage(pageNum)
// Preload adjacent pages
this.preloadAdjacentPages(pageNum)
},
setLoadTimeout() {
this.loadTimeout = setTimeout(() => {
this.loading = true
}, 150)
},
extractFile(filename) {
return new Promise(async (resolve) => {
this.setLoadTimeout()
var file = await this.filesObject[filename].extract()
var reader = new FileReader()
reader.onload = (e) => {
this.mainImg = e.target.result
this.loading = false
resolve()
}
reader.onerror = (e) => {
console.error(e)
this.$toast.error('Read page file failed')
this.loading = false
resolve()
}
reader.readAsDataURL(file)
loadPage(pageNum) {
this.setLoadTimeout()
// Check if already preloaded
const preloaded = this.preloadedPages.get(pageNum)
if (preloaded) {
this.mainImg = preloaded
this.loading = false
clearTimeout(this.loadTimeout)
})
},
async extract() {
this.loading = true
var buff = await this.$axios.$get(this.ebookUrl, {
responseType: 'blob'
})
const archive = await Archive.open(buff)
const originalFilesObject = await archive.getFilesObject()
// to support images in subfolders we need to flatten the object
// ref: https://github.com/advplyr/audiobookshelf/issues/811
this.filesObject = this.flattenFilesObject(originalFilesObject)
console.log('Extracted files object', this.filesObject)
var filenames = Object.keys(this.filesObject)
this.parseFilenames(filenames)
var xmlFile = filenames.find((f) => (Path.extname(f) || '').toLowerCase() === '.xml')
if (xmlFile) await this.extractXmlFile(xmlFile)
this.numPages = this.pages.length
// Calculate page menu size
const largestFilename = this.cleanedPageNames
.map((p) => p)
.sort((a, b) => a.length - b.length)
.pop()
const pEl = document.createElement('p')
pEl.innerText = largestFilename
pEl.style.fontSize = '0.875rem'
pEl.style.opacity = 0
pEl.style.position = 'absolute'
document.body.appendChild(pEl)
const textWidth = pEl.getBoundingClientRect()?.width
if (textWidth) {
this.pageMenuWidth = textWidth + (16 + 5 + 2 + 5)
return
}
pEl.remove()
if (this.pages.length) {
this.loading = false
const startPage = this.savedPage > 0 && this.savedPage <= this.numPages ? this.savedPage : 1
await this.setPage(startPage)
// Load from server
this.mainImg = this.getPageUrl(pageNum)
},
onImageLoad() {
this.loading = false
clearTimeout(this.loadTimeout)
if (!this.loadedFirstPage) {
this.loadedFirstPage = true
} else {
this.$toast.error('Unable to extract pages')
this.loading = false
}
},
flattenFilesObject(filesObject) {
const flattenObject = (obj, prefix = '') => {
var _obj = {}
for (const key in obj) {
const newKey = prefix ? prefix + '/' + key : key
if (obj[key] instanceof CompressedFile) {
_obj[newKey] = obj[key]
} else if (!key.startsWith('_') && typeof obj[key] === 'object' && !Array.isArray(obj[key])) {
_obj = {
..._obj,
...flattenObject(obj[key], newKey)
}
} else {
_obj[newKey] = obj[key]
onImageError() {
this.loading = false
clearTimeout(this.loadTimeout)
this.$toast.error('Failed to load page')
},
preloadAdjacentPages(currentPage) {
// Preload next 2 and previous 1 pages
const pagesToPreload = [currentPage + 1, currentPage + 2, currentPage - 1].filter(
(p) => p >= 1 && p <= this.numPages && !this.preloadedPages.has(p)
)
for (const pageNum of pagesToPreload) {
const img = new Image()
img.src = this.getPageUrl(pageNum)
img.onload = () => {
this.preloadedPages.set(pageNum, img.src)
// Limit cache size
if (this.preloadedPages.size > 10) {
const firstKey = this.preloadedPages.keys().next().value
this.preloadedPages.delete(firstKey)
}
}
return _obj
}
return flattenObject(filesObject)
},
async extractXmlFile(filename) {
console.log('extracting xml filename', filename)
async loadComicMetadata() {
if (!this.libraryItemId) return
this.loading = true
try {
var file = await this.filesObject[filename].extract()
var reader = new FileReader()
reader.onload = (e) => {
this.comicMetadata = this.$xmlToJson(e.target.result)
console.log('Metadata', this.comicMetadata)
const response = await this.$axios.$get(this.comicPagesUrl)
console.log('Comic metadata:', response)
this.fileIno = response.fileIno
this.pages = response.pages.map(p => p.filename)
this.numPages = response.numPages
// Calculate page menu size
const largestFilename = this.cleanedPageNames
.map((p) => p)
.sort((a, b) => a.length - b.length)
.pop()
if (largestFilename) {
const pEl = document.createElement('p')
pEl.innerText = largestFilename
pEl.style.fontSize = '0.875rem'
pEl.style.opacity = 0
pEl.style.position = 'absolute'
document.body.appendChild(pEl)
const textWidth = pEl.getBoundingClientRect()?.width
if (textWidth) {
this.pageMenuWidth = textWidth + (16 + 5 + 2 + 5)
}
pEl.remove()
}
reader.onerror = (e) => {
console.error(e)
if (this.numPages > 0) {
this.loading = false
const startPage = this.savedPage > 0 && this.savedPage <= this.numPages ? this.savedPage : 1
this.setPage(startPage)
} else {
this.$toast.error('Comic has no pages')
this.loading = false
}
reader.readAsText(file)
} catch (error) {
console.error(error)
console.error('Failed to load comic metadata:', error)
this.$toast.error('Failed to load comic')
this.loading = false
}
},
parseImageFilename(filename) {
var basename = Path.basename(filename, Path.extname(filename))
var numbersinpath = basename.match(/\d+/g)
if (!numbersinpath?.length) {
return {
index: -1,
filename
}
} else {
return {
index: Number(numbersinpath[numbersinpath.length - 1]),
filename
}
}
},
parseFilenames(filenames) {
const acceptableImages = ['.jpeg', '.jpg', '.png', '.webp']
var imageFiles = filenames.filter((f) => {
return acceptableImages.includes((Path.extname(f) || '').toLowerCase())
})
var imageFileObjs = imageFiles.map((img) => {
return this.parseImageFilename(img)
})
var imagesWithNum = imageFileObjs.filter((i) => i.index >= 0)
var orderedImages = imagesWithNum.sort((a, b) => a.index - b.index).map((i) => i.filename)
var noNumImages = imageFileObjs.filter((i) => i.index < 0)
orderedImages = orderedImages.concat(noNumImages.map((i) => i.filename))
this.pages = orderedImages
},
zoomIn() {
this.scale += 10
},
@ -372,6 +337,9 @@ export default {
prevButton.removeEventListener('wheel', this.scroll, { passive: false })
nextButton.removeEventListener('wheel', this.scroll, { passive: false })
// Clear preloaded pages
this.preloadedPages.clear()
}
}
</script>

View file

@ -18,7 +18,11 @@ const { escapeRegExp } = require('./utils')
class Auth {
constructor() {
const escapedRouterBasePath = escapeRegExp(global.RouterBasePath)
this.ignorePatterns = [new RegExp(`^(${escapedRouterBasePath}/api)?/items/[^/]+/cover$`), new RegExp(`^(${escapedRouterBasePath}/api)?/authors/[^/]+/image$`)]
this.ignorePatterns = [
new RegExp(`^(${escapedRouterBasePath}/api)?/items/[^/]+/cover$`),
new RegExp(`^(${escapedRouterBasePath}/api)?/authors/[^/]+/image$`),
new RegExp(`^(${escapedRouterBasePath}/api)?/items/[^/]+/comic-page/[0-9]+`)
]
/** @type {import('express-rate-limit').RateLimitRequestHandler} */
this.authRateLimiter = RateLimiterFactory.getAuthRateLimiter()

View file

@ -164,6 +164,8 @@ class Server {
await this.cleanUserData() // Remove invalid user item progress
await CacheManager.ensureCachePaths()
const ComicCacheManager = require('./managers/ComicCacheManager')
await ComicCacheManager.ensureCachePaths()
await ShareManager.init()
await this.backupManager.init()

View file

@ -1157,6 +1157,145 @@ class LibraryItemController {
res.sendStatus(200)
}
/**
* GET api/items/:id/comic-pages/:fileid?
* Get comic metadata (page list) without downloading the whole file
* fileid is optional - defaults to primary ebook
*
* @param {LibraryItemControllerRequest} req
* @param {Response} res
*/
async getComicPages(req, res) {
const ComicCacheManager = require('../managers/ComicCacheManager')
let ebookFile = null
if (req.params.fileid) {
ebookFile = req.libraryItem.getLibraryFileWithIno(req.params.fileid)
if (!ebookFile?.isEBookFile) {
Logger.error(`[LibraryItemController] Invalid ebook file id "${req.params.fileid}"`)
return res.status(400).send('Invalid ebook file id')
}
} else {
ebookFile = req.libraryItem.media.ebookFile
}
if (!ebookFile) {
Logger.error(`[LibraryItemController] No ebookFile for library item "${req.libraryItem.media.title}"`)
return res.sendStatus(404)
}
const ext = (ebookFile.metadata?.ext || '').toLowerCase()
if (ext !== '.cbz' && ext !== '.cbr') {
Logger.error(`[LibraryItemController] File is not a comic book: ${ext}`)
return res.status(400).send('File is not a comic book (cbz/cbr)')
}
try {
const comicPath = ebookFile.metadata.path
const fileIno = ebookFile.ino
const { pages, numPages } = await ComicCacheManager.getComicMetadata(
req.libraryItem.id,
fileIno,
comicPath
)
res.json({
libraryItemId: req.libraryItem.id,
fileIno,
numPages,
pages: pages.map((p, i) => ({
page: i + 1,
filename: p
}))
})
} catch (error) {
Logger.error(`[LibraryItemController] Failed to get comic pages: ${error.message}`)
res.status(500).send('Failed to read comic file')
}
}
/**
* GET api/items/:id/comic-page/:page/:fileid?
* Get a single comic page (extracted and cached on server)
* Public endpoint (no auth required, like covers)
*
* @param {Request} req
* @param {Response} res
*/
async getComicPage(req, res) {
const ComicCacheManager = require('../managers/ComicCacheManager')
const libraryItemId = req.params.id
if (!libraryItemId) {
return res.sendStatus(400)
}
const pageNum = parseInt(req.params.page, 10)
if (isNaN(pageNum) || pageNum < 1) {
return res.status(400).send('Invalid page number')
}
// Fetch library item directly (no auth middleware)
const libraryItem = await Database.libraryItemModel.getExpandedById(libraryItemId)
if (!libraryItem?.media) {
return res.sendStatus(404)
}
let ebookFile = null
if (req.params.fileid) {
ebookFile = libraryItem.getLibraryFileWithIno(req.params.fileid)
if (!ebookFile?.isEBookFile) {
Logger.error(`[LibraryItemController] Invalid ebook file id "${req.params.fileid}"`)
return res.status(400).send('Invalid ebook file id')
}
} else {
ebookFile = libraryItem.media.ebookFile
}
if (!ebookFile) {
Logger.error(`[LibraryItemController] No ebookFile for library item "${libraryItem.media.title}"`)
return res.sendStatus(404)
}
const ext = (ebookFile.metadata?.ext || '').toLowerCase()
if (ext !== '.cbz' && ext !== '.cbr') {
Logger.error(`[LibraryItemController] File is not a comic book: ${ext}`)
return res.status(400).send('File is not a comic book (cbz/cbr)')
}
try {
const comicPath = ebookFile.metadata.path
const fileIno = ebookFile.ino
const result = await ComicCacheManager.getPage(
libraryItemId,
fileIno,
comicPath,
pageNum
)
if (!result) {
return res.sendStatus(404)
}
// Set cache headers for browser caching
res.set({
'Content-Type': result.contentType,
'Cache-Control': 'private, max-age=86400' // Cache for 24 hours
})
if (global.XAccel) {
const encodedURI = encodeUriPath(global.XAccel + result.path)
Logger.debug(`Use X-Accel to serve comic page ${encodedURI}`)
return res.status(204).header({ 'X-Accel-Redirect': encodedURI }).send()
}
res.sendFile(result.path)
} catch (error) {
Logger.error(`[LibraryItemController] Failed to get comic page: ${error.message}`)
res.status(500).send('Failed to extract comic page')
}
}
/**
*
* @param {RequestWithUser} req

View file

@ -0,0 +1,301 @@
const Path = require('path')
const fs = require('../libs/fsExtra')
const Logger = require('../Logger')
const { createComicBookExtractor } = require('../utils/comicBookExtractors')
/**
* Manages caching of extracted comic book pages for performance.
* Pages are extracted on-demand and cached to disk.
*/
class ComicCacheManager {
constructor() {
this.ComicCachePath = null
// In-memory cache of comic metadata (page lists)
// Key: libraryItemId:fileIno, Value: { pages: string[], mtime: number }
this.metadataCache = new Map()
// Track open extractors for reuse within a session
this.extractorCache = new Map()
this.extractorTimeout = 30000 // Close extractors after 30s of inactivity
}
/**
* Initialize cache directory
*/
async ensureCachePaths() {
this.ComicCachePath = Path.join(global.MetadataPath, 'cache', 'comics')
try {
await fs.ensureDir(this.ComicCachePath)
} catch (error) {
Logger.error(`[ComicCacheManager] Failed to create cache directory at "${this.ComicCachePath}": ${error.message}`)
throw error
}
}
/**
* Get cache directory for a specific comic
* @param {string} libraryItemId
* @param {string} fileIno
* @returns {string}
*/
getComicCacheDir(libraryItemId, fileIno) {
return Path.join(this.ComicCachePath, `${libraryItemId}_${fileIno}`)
}
/**
* Get cached page path
* @param {string} libraryItemId
* @param {string} fileIno
* @param {number} pageNum
* @param {string} ext
* @returns {string}
*/
getCachedPagePath(libraryItemId, fileIno, pageNum, ext) {
const cacheDir = this.getComicCacheDir(libraryItemId, fileIno)
return Path.join(cacheDir, `page_${String(pageNum).padStart(5, '0')}${ext}`)
}
/**
* Parse image filenames and return sorted page list
* @param {string[]} filenames
* @returns {string[]}
*/
parseAndSortPages(filenames) {
const acceptableImages = ['.jpeg', '.jpg', '.png', '.webp', '.gif']
const imageFiles = filenames.filter(f => {
const ext = (Path.extname(f) || '').toLowerCase()
return acceptableImages.includes(ext)
})
// Sort by numeric value in filename
const parsed = imageFiles.map(filename => {
const basename = Path.basename(filename, Path.extname(filename))
const numbers = basename.match(/\d+/g)
return {
filename,
index: numbers?.length ? Number(numbers[numbers.length - 1]) : -1
}
})
const withNum = parsed.filter(p => p.index >= 0).sort((a, b) => a.index - b.index)
const withoutNum = parsed.filter(p => p.index < 0)
return [...withNum, ...withoutNum].map(p => p.filename)
}
/**
* Get or create an extractor for a comic, with caching
* @param {string} comicPath
* @param {string} cacheKey
* @returns {Promise<object>}
*/
async getExtractor(comicPath, cacheKey) {
const cached = this.extractorCache.get(cacheKey)
if (cached) {
clearTimeout(cached.timeout)
cached.timeout = setTimeout(() => this.closeExtractor(cacheKey), this.extractorTimeout)
return cached.extractor
}
const extractor = createComicBookExtractor(comicPath)
await extractor.open()
const timeout = setTimeout(() => this.closeExtractor(cacheKey), this.extractorTimeout)
this.extractorCache.set(cacheKey, { extractor, timeout })
return extractor
}
/**
* Close and remove a cached extractor
* @param {string} cacheKey
*/
closeExtractor(cacheKey) {
const cached = this.extractorCache.get(cacheKey)
if (cached) {
clearTimeout(cached.timeout)
try {
cached.extractor.close()
} catch (e) {
Logger.debug(`[ComicCacheManager] Error closing extractor: ${e.message}`)
}
this.extractorCache.delete(cacheKey)
Logger.debug(`[ComicCacheManager] Closed extractor for ${cacheKey}`)
}
}
/**
* Get comic metadata (page list) with caching
* @param {string} libraryItemId
* @param {string} fileIno
* @param {string} comicPath
* @returns {Promise<{pages: string[], numPages: number}>}
*/
async getComicMetadata(libraryItemId, fileIno, comicPath) {
const cacheKey = `${libraryItemId}:${fileIno}`
// Check memory cache
const cached = this.metadataCache.get(cacheKey)
if (cached) {
// Verify file hasn't changed
try {
const stat = await fs.stat(comicPath)
if (stat.mtimeMs === cached.mtime) {
return { pages: cached.pages, numPages: cached.pages.length }
}
} catch (e) {
// File may have been removed
}
this.metadataCache.delete(cacheKey)
}
// Extract metadata
const extractor = await this.getExtractor(comicPath, cacheKey)
const allFiles = await extractor.getFilePaths()
const pages = this.parseAndSortPages(allFiles)
// Get file mtime for cache validation
const stat = await fs.stat(comicPath)
// Cache in memory
this.metadataCache.set(cacheKey, {
pages,
mtime: stat.mtimeMs
})
Logger.debug(`[ComicCacheManager] Cached metadata for ${cacheKey}: ${pages.length} pages`)
return { pages, numPages: pages.length }
}
/**
* Get a specific page, extracting and caching if necessary
* @param {string} libraryItemId
* @param {string} fileIno
* @param {string} comicPath
* @param {number} pageNum - 1-indexed page number
* @returns {Promise<{path: string, contentType: string} | null>}
*/
async getPage(libraryItemId, fileIno, comicPath, pageNum) {
const cacheKey = `${libraryItemId}:${fileIno}`
// Get page list
const { pages } = await this.getComicMetadata(libraryItemId, fileIno, comicPath)
if (pageNum < 1 || pageNum > pages.length) {
Logger.error(`[ComicCacheManager] Invalid page number ${pageNum} for comic with ${pages.length} pages`)
return null
}
const pageFilename = pages[pageNum - 1]
const ext = Path.extname(pageFilename).toLowerCase()
const cachedPath = this.getCachedPagePath(libraryItemId, fileIno, pageNum, ext)
// Check if already cached
if (await fs.pathExists(cachedPath)) {
Logger.debug(`[ComicCacheManager] Serving cached page ${pageNum} from ${cachedPath}`)
return {
path: cachedPath,
contentType: this.getContentType(ext)
}
}
// Extract and cache the page
const cacheDir = this.getComicCacheDir(libraryItemId, fileIno)
await fs.ensureDir(cacheDir)
const extractor = await this.getExtractor(comicPath, cacheKey)
const success = await extractor.extractToFile(pageFilename, cachedPath)
if (!success) {
Logger.error(`[ComicCacheManager] Failed to extract page ${pageNum} (${pageFilename})`)
return null
}
Logger.debug(`[ComicCacheManager] Extracted and cached page ${pageNum} to ${cachedPath}`)
return {
path: cachedPath,
contentType: this.getContentType(ext)
}
}
/**
* Get content type for image extension
* @param {string} ext
* @returns {string}
*/
getContentType(ext) {
const types = {
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.png': 'image/png',
'.webp': 'image/webp',
'.gif': 'image/gif'
}
return types[ext] || 'application/octet-stream'
}
/**
* Purge cached pages for a specific comic
* @param {string} libraryItemId
* @param {string} fileIno
*/
async purgeComicCache(libraryItemId, fileIno) {
const cacheKey = `${libraryItemId}:${fileIno}`
const cacheDir = this.getComicCacheDir(libraryItemId, fileIno)
// Close any open extractor
this.closeExtractor(cacheKey)
// Remove metadata cache
this.metadataCache.delete(cacheKey)
// Remove disk cache
if (await fs.pathExists(cacheDir)) {
await fs.remove(cacheDir)
Logger.info(`[ComicCacheManager] Purged cache for ${cacheKey}`)
}
}
/**
* Purge all cached pages for a library item
* @param {string} libraryItemId
*/
async purgeLibraryItemCache(libraryItemId) {
// Close any open extractors for this item
for (const [key] of this.extractorCache) {
if (key.startsWith(`${libraryItemId}:`)) {
this.closeExtractor(key)
}
}
// Remove metadata cache entries
for (const [key] of this.metadataCache) {
if (key.startsWith(`${libraryItemId}:`)) {
this.metadataCache.delete(key)
}
}
// Remove disk cache
const files = await fs.readdir(this.ComicCachePath).catch(() => [])
for (const file of files) {
if (file.startsWith(`${libraryItemId}_`)) {
await fs.remove(Path.join(this.ComicCachePath, file)).catch(() => {})
}
}
Logger.info(`[ComicCacheManager] Purged all cache for library item ${libraryItemId}`)
}
/**
* Close all open extractors (for shutdown)
*/
closeAllExtractors() {
for (const [key] of this.extractorCache) {
this.closeExtractor(key)
}
}
}
module.exports = new ComicCacheManager()

View file

@ -126,6 +126,9 @@ class ApiRouter {
this.router.get('/items/:id/file/:fileid/download', LibraryItemController.middleware.bind(this), LibraryItemController.downloadLibraryFile.bind(this))
this.router.get('/items/:id/ebook/:fileid?', LibraryItemController.middleware.bind(this), LibraryItemController.getEBookFile.bind(this))
this.router.patch('/items/:id/ebook/:fileid/status', LibraryItemController.middleware.bind(this), LibraryItemController.updateEbookFileStatus.bind(this))
// Comic page routes - server-side extraction with caching for performance
this.router.get('/items/:id/comic-pages/:fileid?', LibraryItemController.middleware.bind(this), LibraryItemController.getComicPages.bind(this))
this.router.get('/items/:id/comic-page/:page/:fileid?', LibraryItemController.getComicPage.bind(this))
//
// User Routes