mirror of
https://github.com/advplyr/audiobookshelf.git
synced 2025-12-25 13:19:38 +00:00
Merge master
This commit is contained in:
commit
a5dacd7821
47 changed files with 3086 additions and 598 deletions
|
|
@ -162,13 +162,6 @@ class FolderWatcher extends EventEmitter {
|
|||
}
|
||||
var folderFullPath = folder.fullPath.replace(/\\/g, '/')
|
||||
|
||||
// Check if file was added to root directory
|
||||
var dir = Path.dirname(path)
|
||||
if (dir === folderFullPath) {
|
||||
Logger.warn(`[Watcher] New file "${Path.basename(path)}" added to folder root - ignoring it`)
|
||||
return
|
||||
}
|
||||
|
||||
var relPath = path.replace(folderFullPath, '')
|
||||
|
||||
var hasDotPath = relPath.split('/').find(p => p.startsWith('.'))
|
||||
|
|
|
|||
|
|
@ -189,8 +189,8 @@ class LibraryItemController {
|
|||
Logger.error(`[LibraryItemController] startPlaybackSession cannot playback ${req.libraryItem.id}`)
|
||||
return res.sendStatus(404)
|
||||
}
|
||||
const options = req.body || {}
|
||||
this.playbackSessionManager.startSessionRequest(req.user, req.libraryItem, null, options, res)
|
||||
|
||||
this.playbackSessionManager.startSessionRequest(req, res, null)
|
||||
}
|
||||
|
||||
// POST: api/items/:id/play/:episodeId
|
||||
|
|
@ -206,8 +206,7 @@ class LibraryItemController {
|
|||
return res.sendStatus(404)
|
||||
}
|
||||
|
||||
const options = req.body || {}
|
||||
this.playbackSessionManager.startSessionRequest(req.user, libraryItem, episodeId, options, res)
|
||||
this.playbackSessionManager.startSessionRequest(req, res, episodeId)
|
||||
}
|
||||
|
||||
// PATCH: api/items/:id/tracks
|
||||
|
|
@ -224,38 +223,6 @@ class LibraryItemController {
|
|||
res.json(libraryItem.toJSON())
|
||||
}
|
||||
|
||||
// PATCH: api/items/:id/episodes
|
||||
async updateEpisodes(req, res) { // For updating podcast episode order
|
||||
var libraryItem = req.libraryItem
|
||||
var orderedFileData = req.body.episodes
|
||||
if (!libraryItem.media.setEpisodeOrder) {
|
||||
Logger.error(`[LibraryItemController] updateEpisodes invalid media type ${libraryItem.id}`)
|
||||
return res.sendStatus(500)
|
||||
}
|
||||
libraryItem.media.setEpisodeOrder(orderedFileData)
|
||||
await this.db.updateLibraryItem(libraryItem)
|
||||
this.emitter('item_updated', libraryItem.toJSONExpanded())
|
||||
res.json(libraryItem.toJSON())
|
||||
}
|
||||
|
||||
// DELETE: api/items/:id/episode/:episodeId
|
||||
async removeEpisode(req, res) {
|
||||
var episodeId = req.params.episodeId
|
||||
var libraryItem = req.libraryItem
|
||||
if (libraryItem.mediaType !== 'podcast') {
|
||||
Logger.error(`[LibraryItemController] removeEpisode invalid media type ${libraryItem.id}`)
|
||||
return res.sendStatus(500)
|
||||
}
|
||||
if (!libraryItem.media.episodes.find(ep => ep.id === episodeId)) {
|
||||
Logger.error(`[LibraryItemController] removeEpisode episode ${episodeId} not found for item ${libraryItem.id}`)
|
||||
return res.sendStatus(404)
|
||||
}
|
||||
libraryItem.media.removeEpisode(episodeId)
|
||||
await this.db.updateLibraryItem(libraryItem)
|
||||
this.emitter('item_updated', libraryItem.toJSONExpanded())
|
||||
res.json(libraryItem.toJSON())
|
||||
}
|
||||
|
||||
// POST api/items/:id/match
|
||||
async match(req, res) {
|
||||
var libraryItem = req.libraryItem
|
||||
|
|
|
|||
|
|
@ -109,10 +109,8 @@ class PodcastController {
|
|||
return res.status(500).send('Invalid podcast RSS feed')
|
||||
}
|
||||
|
||||
if (!payload.podcast.metadata.feedUrl) {
|
||||
// Not every RSS feed will put the feed url in their metadata
|
||||
payload.podcast.metadata.feedUrl = url
|
||||
}
|
||||
// RSS feed may be a private RSS feed
|
||||
payload.podcast.metadata.feedUrl = url
|
||||
|
||||
res.json(payload)
|
||||
}).catch((error) => {
|
||||
|
|
@ -190,6 +188,35 @@ class PodcastController {
|
|||
res.json(libraryItem.toJSONExpanded())
|
||||
}
|
||||
|
||||
// DELETE: api/podcasts/:id/episode/:episodeId
|
||||
async removeEpisode(req, res) {
|
||||
var episodeId = req.params.episodeId
|
||||
var libraryItem = req.libraryItem
|
||||
var hardDelete = req.query.hard === '1'
|
||||
|
||||
var episode = libraryItem.media.episodes.find(ep => ep.id === episodeId)
|
||||
if (!episode) {
|
||||
Logger.error(`[PodcastController] removeEpisode episode ${episodeId} not found for item ${libraryItem.id}`)
|
||||
return res.sendStatus(404)
|
||||
}
|
||||
|
||||
if (hardDelete) {
|
||||
var audioFile = episode.audioFile
|
||||
// TODO: this will trigger the watcher. should maybe handle this gracefully
|
||||
await fs.remove(audioFile.metadata.path).then(() => {
|
||||
Logger.info(`[PodcastController] Hard deleted episode file at "${audioFile.metadata.path}"`)
|
||||
}).catch((error) => {
|
||||
Logger.error(`[PodcastController] Failed to hard delete episode file at "${audioFile.metadata.path}"`, error)
|
||||
})
|
||||
}
|
||||
|
||||
libraryItem.media.removeEpisode(episodeId)
|
||||
|
||||
await this.db.updateLibraryItem(libraryItem)
|
||||
this.emitter('item_updated', libraryItem.toJSONExpanded())
|
||||
res.json(libraryItem.toJSON())
|
||||
}
|
||||
|
||||
middleware(req, res, next) {
|
||||
var item = this.db.libraryItems.find(li => li.id === req.params.id)
|
||||
if (!item || !item.media) return res.sendStatus(404)
|
||||
|
|
|
|||
5
server/libs/isJs.js
Normal file
5
server/libs/isJs.js
Normal file
File diff suppressed because one or more lines are too long
174
server/libs/requestIp.js
Normal file
174
server/libs/requestIp.js
Normal file
|
|
@ -0,0 +1,174 @@
|
|||
// SOURCE: https://github.com/pbojinov/request-ip
|
||||
|
||||
"use strict";
|
||||
|
||||
function _typeof(obj) { if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof(obj); }
|
||||
|
||||
var is = require('./isJs');
|
||||
/**
|
||||
* Parse x-forwarded-for headers.
|
||||
*
|
||||
* @param {string} value - The value to be parsed.
|
||||
* @return {string|null} First known IP address, if any.
|
||||
*/
|
||||
|
||||
|
||||
function getClientIpFromXForwardedFor(value) {
|
||||
if (!is.existy(value)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (is.not.string(value)) {
|
||||
throw new TypeError("Expected a string, got \"".concat(_typeof(value), "\""));
|
||||
} // x-forwarded-for may return multiple IP addresses in the format:
|
||||
// "client IP, proxy 1 IP, proxy 2 IP"
|
||||
// Therefore, the right-most IP address is the IP address of the most recent proxy
|
||||
// and the left-most IP address is the IP address of the originating client.
|
||||
// source: http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/x-forwarded-headers.html
|
||||
// Azure Web App's also adds a port for some reason, so we'll only use the first part (the IP)
|
||||
|
||||
|
||||
var forwardedIps = value.split(',').map(function (e) {
|
||||
var ip = e.trim();
|
||||
|
||||
if (ip.includes(':')) {
|
||||
var splitted = ip.split(':'); // make sure we only use this if it's ipv4 (ip:port)
|
||||
|
||||
if (splitted.length === 2) {
|
||||
return splitted[0];
|
||||
}
|
||||
}
|
||||
|
||||
return ip;
|
||||
}); // Sometimes IP addresses in this header can be 'unknown' (http://stackoverflow.com/a/11285650).
|
||||
// Therefore taking the left-most IP address that is not unknown
|
||||
// A Squid configuration directive can also set the value to "unknown" (http://www.squid-cache.org/Doc/config/forwarded_for/)
|
||||
|
||||
return forwardedIps.find(is.ip);
|
||||
}
|
||||
/**
|
||||
* Determine client IP address.
|
||||
*
|
||||
* @param req
|
||||
* @returns {string} ip - The IP address if known, defaulting to empty string if unknown.
|
||||
*/
|
||||
|
||||
|
||||
function getClientIp(req) {
|
||||
// Server is probably behind a proxy.
|
||||
if (req.headers) {
|
||||
// Standard headers used by Amazon EC2, Heroku, and others.
|
||||
if (is.ip(req.headers['x-client-ip'])) {
|
||||
return req.headers['x-client-ip'];
|
||||
} // Load-balancers (AWS ELB) or proxies.
|
||||
|
||||
|
||||
var xForwardedFor = getClientIpFromXForwardedFor(req.headers['x-forwarded-for']);
|
||||
|
||||
if (is.ip(xForwardedFor)) {
|
||||
return xForwardedFor;
|
||||
} // Cloudflare.
|
||||
// @see https://support.cloudflare.com/hc/en-us/articles/200170986-How-does-Cloudflare-handle-HTTP-Request-headers-
|
||||
// CF-Connecting-IP - applied to every request to the origin.
|
||||
|
||||
|
||||
if (is.ip(req.headers['cf-connecting-ip'])) {
|
||||
return req.headers['cf-connecting-ip'];
|
||||
} // Fastly and Firebase hosting header (When forwared to cloud function)
|
||||
|
||||
|
||||
if (is.ip(req.headers['fastly-client-ip'])) {
|
||||
return req.headers['fastly-client-ip'];
|
||||
} // Akamai and Cloudflare: True-Client-IP.
|
||||
|
||||
|
||||
if (is.ip(req.headers['true-client-ip'])) {
|
||||
return req.headers['true-client-ip'];
|
||||
} // Default nginx proxy/fcgi; alternative to x-forwarded-for, used by some proxies.
|
||||
|
||||
|
||||
if (is.ip(req.headers['x-real-ip'])) {
|
||||
return req.headers['x-real-ip'];
|
||||
} // (Rackspace LB and Riverbed's Stingray)
|
||||
// http://www.rackspace.com/knowledge_center/article/controlling-access-to-linux-cloud-sites-based-on-the-client-ip-address
|
||||
// https://splash.riverbed.com/docs/DOC-1926
|
||||
|
||||
|
||||
if (is.ip(req.headers['x-cluster-client-ip'])) {
|
||||
return req.headers['x-cluster-client-ip'];
|
||||
}
|
||||
|
||||
if (is.ip(req.headers['x-forwarded'])) {
|
||||
return req.headers['x-forwarded'];
|
||||
}
|
||||
|
||||
if (is.ip(req.headers['forwarded-for'])) {
|
||||
return req.headers['forwarded-for'];
|
||||
}
|
||||
|
||||
if (is.ip(req.headers.forwarded)) {
|
||||
return req.headers.forwarded;
|
||||
}
|
||||
} // Remote address checks.
|
||||
|
||||
|
||||
if (is.existy(req.connection)) {
|
||||
if (is.ip(req.connection.remoteAddress)) {
|
||||
return req.connection.remoteAddress;
|
||||
}
|
||||
|
||||
if (is.existy(req.connection.socket) && is.ip(req.connection.socket.remoteAddress)) {
|
||||
return req.connection.socket.remoteAddress;
|
||||
}
|
||||
}
|
||||
|
||||
if (is.existy(req.socket) && is.ip(req.socket.remoteAddress)) {
|
||||
return req.socket.remoteAddress;
|
||||
}
|
||||
|
||||
if (is.existy(req.info) && is.ip(req.info.remoteAddress)) {
|
||||
return req.info.remoteAddress;
|
||||
} // AWS Api Gateway + Lambda
|
||||
|
||||
|
||||
if (is.existy(req.requestContext) && is.existy(req.requestContext.identity) && is.ip(req.requestContext.identity.sourceIp)) {
|
||||
return req.requestContext.identity.sourceIp;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Expose request IP as a middleware.
|
||||
*
|
||||
* @param {object} [options] - Configuration.
|
||||
* @param {string} [options.attributeName] - Name of attribute to augment request object with.
|
||||
* @return {*}
|
||||
*/
|
||||
|
||||
|
||||
function mw(options) {
|
||||
// Defaults.
|
||||
var configuration = is.not.existy(options) ? {} : options; // Validation.
|
||||
|
||||
if (is.not.object(configuration)) {
|
||||
throw new TypeError('Options must be an object!');
|
||||
}
|
||||
|
||||
var attributeName = configuration.attributeName || 'clientIp';
|
||||
return function (req, res, next) {
|
||||
var ip = getClientIp(req);
|
||||
Object.defineProperty(req, attributeName, {
|
||||
get: function get() {
|
||||
return ip;
|
||||
},
|
||||
configurable: true
|
||||
});
|
||||
next();
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getClientIpFromXForwardedFor: getClientIpFromXForwardedFor,
|
||||
getClientIp: getClientIp,
|
||||
mw: mw
|
||||
};
|
||||
874
server/libs/sanitizeHtml.js
Normal file
874
server/libs/sanitizeHtml.js
Normal file
|
|
@ -0,0 +1,874 @@
|
|||
/*
|
||||
sanitize-html (Apostrophe Technologies)
|
||||
SOURCE: https://github.com/apostrophecms/sanitize-html
|
||||
LICENSE: https://github.com/apostrophecms/sanitize-html/blob/main/LICENSE
|
||||
|
||||
Modified for audiobookshelf
|
||||
*/
|
||||
|
||||
const htmlparser = require('htmlparser2');
|
||||
// const escapeStringRegexp = require('escape-string-regexp');
|
||||
// const { isPlainObject } = require('is-plain-object');
|
||||
// const deepmerge = require('deepmerge');
|
||||
// const parseSrcset = require('parse-srcset');
|
||||
// const { parse: postcssParse } = require('postcss');
|
||||
// Tags that can conceivably represent stand-alone media.
|
||||
|
||||
// ABS UPDATE: Packages not necessary
|
||||
// SOURCE: https://github.com/sindresorhus/escape-string-regexp/blob/main/index.js
|
||||
function escapeStringRegexp(string) {
|
||||
if (typeof string !== 'string') {
|
||||
throw new TypeError('Expected a string');
|
||||
}
|
||||
|
||||
// Escape characters with special meaning either inside or outside character sets.
|
||||
// Use a simple backslash escape when it’s always valid, and a `\xnn` escape when the simpler form would be disallowed by Unicode patterns’ stricter grammar.
|
||||
return string
|
||||
.replace(/[|\\{}()[\]^$+*?.]/g, '\\$&')
|
||||
.replace(/-/g, '\\x2d');
|
||||
}
|
||||
|
||||
// SOURCE: https://github.com/jonschlinkert/is-plain-object/blob/master/is-plain-object.js
|
||||
function isObject(o) {
|
||||
return Object.prototype.toString.call(o) === '[object Object]';
|
||||
}
|
||||
|
||||
function isPlainObject(o) {
|
||||
var ctor, prot;
|
||||
|
||||
if (isObject(o) === false) return false;
|
||||
|
||||
// If has modified constructor
|
||||
ctor = o.constructor;
|
||||
if (ctor === undefined) return true;
|
||||
|
||||
// If has modified prototype
|
||||
prot = ctor.prototype;
|
||||
if (isObject(prot) === false) return false;
|
||||
|
||||
// If constructor does not have an Object-specific method
|
||||
if (prot.hasOwnProperty('isPrototypeOf') === false) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Most likely a plain Object
|
||||
return true;
|
||||
};
|
||||
|
||||
|
||||
const mediaTags = [
|
||||
'img', 'audio', 'video', 'picture', 'svg',
|
||||
'object', 'map', 'iframe', 'embed'
|
||||
];
|
||||
// Tags that are inherently vulnerable to being used in XSS attacks.
|
||||
const vulnerableTags = ['script', 'style'];
|
||||
|
||||
function each(obj, cb) {
|
||||
if (obj) {
|
||||
Object.keys(obj).forEach(function (key) {
|
||||
cb(obj[key], key);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid false positives with .__proto__, .hasOwnProperty, etc.
|
||||
function has(obj, key) {
|
||||
return ({}).hasOwnProperty.call(obj, key);
|
||||
}
|
||||
|
||||
// Returns those elements of `a` for which `cb(a)` returns truthy
|
||||
function filter(a, cb) {
|
||||
const n = [];
|
||||
each(a, function (v) {
|
||||
if (cb(v)) {
|
||||
n.push(v);
|
||||
}
|
||||
});
|
||||
return n;
|
||||
}
|
||||
|
||||
function isEmptyObject(obj) {
|
||||
for (const key in obj) {
|
||||
if (has(obj, key)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function stringifySrcset(parsedSrcset) {
|
||||
return parsedSrcset.map(function (part) {
|
||||
if (!part.url) {
|
||||
throw new Error('URL missing');
|
||||
}
|
||||
|
||||
return (
|
||||
part.url +
|
||||
(part.w ? ` ${part.w}w` : '') +
|
||||
(part.h ? ` ${part.h}h` : '') +
|
||||
(part.d ? ` ${part.d}x` : '')
|
||||
);
|
||||
}).join(', ');
|
||||
}
|
||||
|
||||
module.exports = sanitizeHtml;
|
||||
|
||||
// A valid attribute name.
|
||||
// We use a tolerant definition based on the set of strings defined by
|
||||
// html.spec.whatwg.org/multipage/parsing.html#before-attribute-name-state
|
||||
// and html.spec.whatwg.org/multipage/parsing.html#attribute-name-state .
|
||||
// The characters accepted are ones which can be appended to the attribute
|
||||
// name buffer without triggering a parse error:
|
||||
// * unexpected-equals-sign-before-attribute-name
|
||||
// * unexpected-null-character
|
||||
// * unexpected-character-in-attribute-name
|
||||
// We exclude the empty string because it's impossible to get to the after
|
||||
// attribute name state with an empty attribute name buffer.
|
||||
const VALID_HTML_ATTRIBUTE_NAME = /^[^\0\t\n\f\r /<=>]+$/;
|
||||
|
||||
// Ignore the _recursing flag; it's there for recursive
|
||||
// invocation as a guard against this exploit:
|
||||
// https://github.com/fb55/htmlparser2/issues/105
|
||||
|
||||
function sanitizeHtml(html, options, _recursing) {
|
||||
if (html == null) {
|
||||
return '';
|
||||
}
|
||||
|
||||
let result = '';
|
||||
// Used for hot swapping the result variable with an empty string in order to "capture" the text written to it.
|
||||
let tempResult = '';
|
||||
|
||||
function Frame(tag, attribs) {
|
||||
const that = this;
|
||||
this.tag = tag;
|
||||
this.attribs = attribs || {};
|
||||
this.tagPosition = result.length;
|
||||
this.text = ''; // Node inner text
|
||||
this.mediaChildren = [];
|
||||
|
||||
this.updateParentNodeText = function () {
|
||||
if (stack.length) {
|
||||
const parentFrame = stack[stack.length - 1];
|
||||
parentFrame.text += that.text;
|
||||
}
|
||||
};
|
||||
|
||||
this.updateParentNodeMediaChildren = function () {
|
||||
if (stack.length && mediaTags.includes(this.tag)) {
|
||||
const parentFrame = stack[stack.length - 1];
|
||||
parentFrame.mediaChildren.push(this.tag);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
options = Object.assign({}, sanitizeHtml.defaults, options);
|
||||
options.parser = Object.assign({}, htmlParserDefaults, options.parser);
|
||||
|
||||
// vulnerableTags
|
||||
vulnerableTags.forEach(function (tag) {
|
||||
if (
|
||||
options.allowedTags && options.allowedTags.indexOf(tag) > -1 &&
|
||||
!options.allowVulnerableTags
|
||||
) {
|
||||
console.warn(`\n\n⚠️ Your \`allowedTags\` option includes, \`${tag}\`, which is inherently\nvulnerable to XSS attacks. Please remove it from \`allowedTags\`.\nOr, to disable this warning, add the \`allowVulnerableTags\` option\nand ensure you are accounting for this risk.\n\n`);
|
||||
}
|
||||
});
|
||||
|
||||
// Tags that contain something other than HTML, or where discarding
|
||||
// the text when the tag is disallowed makes sense for other reasons.
|
||||
// If we are not allowing these tags, we should drop their content too.
|
||||
// For other tags you would drop the tag but keep its content.
|
||||
const nonTextTagsArray = options.nonTextTags || [
|
||||
'script',
|
||||
'style',
|
||||
'textarea',
|
||||
'option'
|
||||
];
|
||||
let allowedAttributesMap;
|
||||
let allowedAttributesGlobMap;
|
||||
if (options.allowedAttributes) {
|
||||
allowedAttributesMap = {};
|
||||
allowedAttributesGlobMap = {};
|
||||
each(options.allowedAttributes, function (attributes, tag) {
|
||||
allowedAttributesMap[tag] = [];
|
||||
const globRegex = [];
|
||||
attributes.forEach(function (obj) {
|
||||
if (typeof obj === 'string' && obj.indexOf('*') >= 0) {
|
||||
globRegex.push(escapeStringRegexp(obj).replace(/\\\*/g, '.*'));
|
||||
} else {
|
||||
allowedAttributesMap[tag].push(obj);
|
||||
}
|
||||
});
|
||||
if (globRegex.length) {
|
||||
allowedAttributesGlobMap[tag] = new RegExp('^(' + globRegex.join('|') + ')$');
|
||||
}
|
||||
});
|
||||
}
|
||||
const allowedClassesMap = {};
|
||||
const allowedClassesGlobMap = {};
|
||||
const allowedClassesRegexMap = {};
|
||||
each(options.allowedClasses, function (classes, tag) {
|
||||
// Implicitly allows the class attribute
|
||||
if (allowedAttributesMap) {
|
||||
if (!has(allowedAttributesMap, tag)) {
|
||||
allowedAttributesMap[tag] = [];
|
||||
}
|
||||
allowedAttributesMap[tag].push('class');
|
||||
}
|
||||
|
||||
allowedClassesMap[tag] = [];
|
||||
allowedClassesRegexMap[tag] = [];
|
||||
const globRegex = [];
|
||||
classes.forEach(function (obj) {
|
||||
if (typeof obj === 'string' && obj.indexOf('*') >= 0) {
|
||||
globRegex.push(escapeStringRegexp(obj).replace(/\\\*/g, '.*'));
|
||||
} else if (obj instanceof RegExp) {
|
||||
allowedClassesRegexMap[tag].push(obj);
|
||||
} else {
|
||||
allowedClassesMap[tag].push(obj);
|
||||
}
|
||||
});
|
||||
if (globRegex.length) {
|
||||
allowedClassesGlobMap[tag] = new RegExp('^(' + globRegex.join('|') + ')$');
|
||||
}
|
||||
});
|
||||
|
||||
const transformTagsMap = {};
|
||||
let transformTagsAll;
|
||||
each(options.transformTags, function (transform, tag) {
|
||||
let transFun;
|
||||
if (typeof transform === 'function') {
|
||||
transFun = transform;
|
||||
} else if (typeof transform === 'string') {
|
||||
transFun = sanitizeHtml.simpleTransform(transform);
|
||||
}
|
||||
if (tag === '*') {
|
||||
transformTagsAll = transFun;
|
||||
} else {
|
||||
transformTagsMap[tag] = transFun;
|
||||
}
|
||||
});
|
||||
|
||||
let depth;
|
||||
let stack;
|
||||
let skipMap;
|
||||
let transformMap;
|
||||
let skipText;
|
||||
let skipTextDepth;
|
||||
let addedText = false;
|
||||
|
||||
initializeState();
|
||||
|
||||
const parser = new htmlparser.Parser({
|
||||
onopentag: function (name, attribs) {
|
||||
// If `enforceHtmlBoundary` is `true` and this has found the opening
|
||||
// `html` tag, reset the state.
|
||||
if (options.enforceHtmlBoundary && name === 'html') {
|
||||
initializeState();
|
||||
}
|
||||
|
||||
if (skipText) {
|
||||
skipTextDepth++;
|
||||
return;
|
||||
}
|
||||
const frame = new Frame(name, attribs);
|
||||
stack.push(frame);
|
||||
|
||||
let skip = false;
|
||||
const hasText = !!frame.text;
|
||||
let transformedTag;
|
||||
if (has(transformTagsMap, name)) {
|
||||
transformedTag = transformTagsMap[name](name, attribs);
|
||||
|
||||
frame.attribs = attribs = transformedTag.attribs;
|
||||
|
||||
if (transformedTag.text !== undefined) {
|
||||
frame.innerText = transformedTag.text;
|
||||
}
|
||||
|
||||
if (name !== transformedTag.tagName) {
|
||||
frame.name = name = transformedTag.tagName;
|
||||
transformMap[depth] = transformedTag.tagName;
|
||||
}
|
||||
}
|
||||
if (transformTagsAll) {
|
||||
transformedTag = transformTagsAll(name, attribs);
|
||||
|
||||
frame.attribs = attribs = transformedTag.attribs;
|
||||
if (name !== transformedTag.tagName) {
|
||||
frame.name = name = transformedTag.tagName;
|
||||
transformMap[depth] = transformedTag.tagName;
|
||||
}
|
||||
}
|
||||
|
||||
if ((options.allowedTags && options.allowedTags.indexOf(name) === -1) || (options.disallowedTagsMode === 'recursiveEscape' && !isEmptyObject(skipMap)) || (options.nestingLimit != null && depth >= options.nestingLimit)) {
|
||||
skip = true;
|
||||
skipMap[depth] = true;
|
||||
if (options.disallowedTagsMode === 'discard') {
|
||||
if (nonTextTagsArray.indexOf(name) !== -1) {
|
||||
skipText = true;
|
||||
skipTextDepth = 1;
|
||||
}
|
||||
}
|
||||
skipMap[depth] = true;
|
||||
}
|
||||
depth++;
|
||||
if (skip) {
|
||||
if (options.disallowedTagsMode === 'discard') {
|
||||
// We want the contents but not this tag
|
||||
return;
|
||||
}
|
||||
tempResult = result;
|
||||
result = '';
|
||||
}
|
||||
result += '<' + name;
|
||||
|
||||
if (name === 'script') {
|
||||
if (options.allowedScriptHostnames || options.allowedScriptDomains) {
|
||||
frame.innerText = '';
|
||||
}
|
||||
}
|
||||
|
||||
if (!allowedAttributesMap || has(allowedAttributesMap, name) || allowedAttributesMap['*']) {
|
||||
each(attribs, function (value, a) {
|
||||
if (!VALID_HTML_ATTRIBUTE_NAME.test(a)) {
|
||||
// This prevents part of an attribute name in the output from being
|
||||
// interpreted as the end of an attribute, or end of a tag.
|
||||
delete frame.attribs[a];
|
||||
return;
|
||||
}
|
||||
let parsed;
|
||||
// check allowedAttributesMap for the element and attribute and modify the value
|
||||
// as necessary if there are specific values defined.
|
||||
let passedAllowedAttributesMapCheck = false;
|
||||
if (!allowedAttributesMap ||
|
||||
(has(allowedAttributesMap, name) && allowedAttributesMap[name].indexOf(a) !== -1) ||
|
||||
(allowedAttributesMap['*'] && allowedAttributesMap['*'].indexOf(a) !== -1) ||
|
||||
(has(allowedAttributesGlobMap, name) && allowedAttributesGlobMap[name].test(a)) ||
|
||||
(allowedAttributesGlobMap['*'] && allowedAttributesGlobMap['*'].test(a))) {
|
||||
passedAllowedAttributesMapCheck = true;
|
||||
} else if (allowedAttributesMap && allowedAttributesMap[name]) {
|
||||
for (const o of allowedAttributesMap[name]) {
|
||||
if (isPlainObject(o) && o.name && (o.name === a)) {
|
||||
passedAllowedAttributesMapCheck = true;
|
||||
let newValue = '';
|
||||
if (o.multiple === true) {
|
||||
// verify the values that are allowed
|
||||
const splitStrArray = value.split(' ');
|
||||
for (const s of splitStrArray) {
|
||||
if (o.values.indexOf(s) !== -1) {
|
||||
if (newValue === '') {
|
||||
newValue = s;
|
||||
} else {
|
||||
newValue += ' ' + s;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (o.values.indexOf(value) >= 0) {
|
||||
// verified an allowed value matches the entire attribute value
|
||||
newValue = value;
|
||||
}
|
||||
value = newValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (passedAllowedAttributesMapCheck) {
|
||||
if (options.allowedSchemesAppliedToAttributes.indexOf(a) !== -1) {
|
||||
if (naughtyHref(name, value)) {
|
||||
delete frame.attribs[a];
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (name === 'script' && a === 'src') {
|
||||
|
||||
let allowed = true;
|
||||
|
||||
try {
|
||||
const parsed = new URL(value);
|
||||
|
||||
if (options.allowedScriptHostnames || options.allowedScriptDomains) {
|
||||
const allowedHostname = (options.allowedScriptHostnames || []).find(function (hostname) {
|
||||
return hostname === parsed.hostname;
|
||||
});
|
||||
const allowedDomain = (options.allowedScriptDomains || []).find(function (domain) {
|
||||
return parsed.hostname === domain || parsed.hostname.endsWith(`.${domain}`);
|
||||
});
|
||||
allowed = allowedHostname || allowedDomain;
|
||||
}
|
||||
} catch (e) {
|
||||
allowed = false;
|
||||
}
|
||||
|
||||
if (!allowed) {
|
||||
delete frame.attribs[a];
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (name === 'iframe' && a === 'src') {
|
||||
let allowed = true;
|
||||
try {
|
||||
// Chrome accepts \ as a substitute for / in the // at the
|
||||
// start of a URL, so rewrite accordingly to prevent exploit.
|
||||
// Also drop any whitespace at that point in the URL
|
||||
value = value.replace(/^(\w+:)?\s*[\\/]\s*[\\/]/, '$1//');
|
||||
if (value.startsWith('relative:')) {
|
||||
// An attempt to exploit our workaround for base URLs being
|
||||
// mandatory for relative URL validation in the WHATWG
|
||||
// URL parser, reject it
|
||||
throw new Error('relative: exploit attempt');
|
||||
}
|
||||
// naughtyHref is in charge of whether protocol relative URLs
|
||||
// are cool. Here we are concerned just with allowed hostnames and
|
||||
// whether to allow relative URLs.
|
||||
//
|
||||
// Build a placeholder "base URL" against which any reasonable
|
||||
// relative URL may be parsed successfully
|
||||
let base = 'relative://relative-site';
|
||||
for (let i = 0; (i < 100); i++) {
|
||||
base += `/${i}`;
|
||||
}
|
||||
const parsed = new URL(value, base);
|
||||
const isRelativeUrl = parsed && parsed.hostname === 'relative-site' && parsed.protocol === 'relative:';
|
||||
if (isRelativeUrl) {
|
||||
// default value of allowIframeRelativeUrls is true
|
||||
// unless allowedIframeHostnames or allowedIframeDomains specified
|
||||
allowed = has(options, 'allowIframeRelativeUrls')
|
||||
? options.allowIframeRelativeUrls
|
||||
: (!options.allowedIframeHostnames && !options.allowedIframeDomains);
|
||||
} else if (options.allowedIframeHostnames || options.allowedIframeDomains) {
|
||||
const allowedHostname = (options.allowedIframeHostnames || []).find(function (hostname) {
|
||||
return hostname === parsed.hostname;
|
||||
});
|
||||
const allowedDomain = (options.allowedIframeDomains || []).find(function (domain) {
|
||||
return parsed.hostname === domain || parsed.hostname.endsWith(`.${domain}`);
|
||||
});
|
||||
allowed = allowedHostname || allowedDomain;
|
||||
}
|
||||
} catch (e) {
|
||||
// Unparseable iframe src
|
||||
allowed = false;
|
||||
}
|
||||
if (!allowed) {
|
||||
delete frame.attribs[a];
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (a === 'srcset') {
|
||||
delete frame.attribs[a];
|
||||
|
||||
// ABS UPDATE: srcset not necessary
|
||||
// try {
|
||||
// parsed = parseSrcset(value);
|
||||
// parsed.forEach(function (value) {
|
||||
// if (naughtyHref('srcset', value.url)) {
|
||||
// value.evil = true;
|
||||
// }
|
||||
// });
|
||||
// parsed = filter(parsed, function (v) {
|
||||
// return !v.evil;
|
||||
// });
|
||||
// if (!parsed.length) {
|
||||
// delete frame.attribs[a];
|
||||
// return;
|
||||
// } else {
|
||||
// value = stringifySrcset(filter(parsed, function (v) {
|
||||
// return !v.evil;
|
||||
// }));
|
||||
// frame.attribs[a] = value;
|
||||
// }
|
||||
// } catch (e) {
|
||||
// // Unparseable srcset
|
||||
// delete frame.attribs[a];
|
||||
// return;
|
||||
// }
|
||||
}
|
||||
if (a === 'class') {
|
||||
const allowedSpecificClasses = allowedClassesMap[name];
|
||||
const allowedWildcardClasses = allowedClassesMap['*'];
|
||||
const allowedSpecificClassesGlob = allowedClassesGlobMap[name];
|
||||
const allowedSpecificClassesRegex = allowedClassesRegexMap[name];
|
||||
const allowedWildcardClassesGlob = allowedClassesGlobMap['*'];
|
||||
const allowedClassesGlobs = [
|
||||
allowedSpecificClassesGlob,
|
||||
allowedWildcardClassesGlob
|
||||
]
|
||||
.concat(allowedSpecificClassesRegex)
|
||||
.filter(function (t) {
|
||||
return t;
|
||||
});
|
||||
if (allowedSpecificClasses && allowedWildcardClasses) {
|
||||
// ABS UPDATE: classes and wildcard classes not necessary now
|
||||
// value = filterClasses(value, deepmerge(allowedSpecificClasses, allowedWildcardClasses), allowedClassesGlobs);
|
||||
} else {
|
||||
value = filterClasses(value, allowedSpecificClasses || allowedWildcardClasses, allowedClassesGlobs);
|
||||
}
|
||||
if (!value.length) {
|
||||
delete frame.attribs[a];
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (a === 'style') {
|
||||
delete frame.attribs[a];
|
||||
|
||||
// ABS UPDATE: Styles not necessary
|
||||
// try {
|
||||
// const abstractSyntaxTree = postcssParse(name + ' {' + value + '}');
|
||||
// const filteredAST = filterCss(abstractSyntaxTree, options.allowedStyles);
|
||||
|
||||
// value = stringifyStyleAttributes(filteredAST);
|
||||
|
||||
// if (value.length === 0) {
|
||||
// delete frame.attribs[a];
|
||||
// return;
|
||||
// }
|
||||
// } catch (e) {
|
||||
// delete frame.attribs[a];
|
||||
// return;
|
||||
// }
|
||||
}
|
||||
result += ' ' + a;
|
||||
if (value && value.length) {
|
||||
result += '="' + escapeHtml(value, true) + '"';
|
||||
}
|
||||
} else {
|
||||
delete frame.attribs[a];
|
||||
}
|
||||
});
|
||||
}
|
||||
if (options.selfClosing.indexOf(name) !== -1) {
|
||||
result += ' />';
|
||||
} else {
|
||||
result += '>';
|
||||
if (frame.innerText && !hasText && !options.textFilter) {
|
||||
result += escapeHtml(frame.innerText);
|
||||
addedText = true;
|
||||
}
|
||||
}
|
||||
if (skip) {
|
||||
result = tempResult + escapeHtml(result);
|
||||
tempResult = '';
|
||||
}
|
||||
},
|
||||
ontext: function (text) {
|
||||
if (skipText) {
|
||||
return;
|
||||
}
|
||||
const lastFrame = stack[stack.length - 1];
|
||||
let tag;
|
||||
|
||||
if (lastFrame) {
|
||||
tag = lastFrame.tag;
|
||||
// If inner text was set by transform function then let's use it
|
||||
text = lastFrame.innerText !== undefined ? lastFrame.innerText : text;
|
||||
}
|
||||
|
||||
if (options.disallowedTagsMode === 'discard' && ((tag === 'script') || (tag === 'style'))) {
|
||||
// htmlparser2 gives us these as-is. Escaping them ruins the content. Allowing
|
||||
// script tags is, by definition, game over for XSS protection, so if that's
|
||||
// your concern, don't allow them. The same is essentially true for style tags
|
||||
// which have their own collection of XSS vectors.
|
||||
result += text;
|
||||
} else {
|
||||
const escaped = escapeHtml(text, false);
|
||||
if (options.textFilter && !addedText) {
|
||||
result += options.textFilter(escaped, tag);
|
||||
} else if (!addedText) {
|
||||
result += escaped;
|
||||
}
|
||||
}
|
||||
if (stack.length) {
|
||||
const frame = stack[stack.length - 1];
|
||||
frame.text += text;
|
||||
}
|
||||
},
|
||||
onclosetag: function (name) {
|
||||
|
||||
if (skipText) {
|
||||
skipTextDepth--;
|
||||
if (!skipTextDepth) {
|
||||
skipText = false;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const frame = stack.pop();
|
||||
if (!frame) {
|
||||
// Do not crash on bad markup
|
||||
return;
|
||||
}
|
||||
skipText = options.enforceHtmlBoundary ? name === 'html' : false;
|
||||
depth--;
|
||||
const skip = skipMap[depth];
|
||||
if (skip) {
|
||||
delete skipMap[depth];
|
||||
if (options.disallowedTagsMode === 'discard') {
|
||||
frame.updateParentNodeText();
|
||||
return;
|
||||
}
|
||||
tempResult = result;
|
||||
result = '';
|
||||
}
|
||||
|
||||
if (transformMap[depth]) {
|
||||
name = transformMap[depth];
|
||||
delete transformMap[depth];
|
||||
}
|
||||
|
||||
if (options.exclusiveFilter && options.exclusiveFilter(frame)) {
|
||||
result = result.substr(0, frame.tagPosition);
|
||||
return;
|
||||
}
|
||||
|
||||
frame.updateParentNodeMediaChildren();
|
||||
frame.updateParentNodeText();
|
||||
|
||||
if (options.selfClosing.indexOf(name) !== -1) {
|
||||
// Already output />
|
||||
if (skip) {
|
||||
result = tempResult;
|
||||
tempResult = '';
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
result += '</' + name + '>';
|
||||
if (skip) {
|
||||
result = tempResult + escapeHtml(result);
|
||||
tempResult = '';
|
||||
}
|
||||
addedText = false;
|
||||
}
|
||||
}, options.parser);
|
||||
parser.write(html);
|
||||
parser.end();
|
||||
|
||||
return result;
|
||||
|
||||
function initializeState() {
|
||||
result = '';
|
||||
depth = 0;
|
||||
stack = [];
|
||||
skipMap = {};
|
||||
transformMap = {};
|
||||
skipText = false;
|
||||
skipTextDepth = 0;
|
||||
}
|
||||
|
||||
function escapeHtml(s, quote) {
|
||||
if (typeof (s) !== 'string') {
|
||||
s = s + '';
|
||||
}
|
||||
if (options.parser.decodeEntities) {
|
||||
s = s.replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>');
|
||||
if (quote) {
|
||||
s = s.replace(/"/g, '"');
|
||||
}
|
||||
}
|
||||
// TODO: this is inadequate because it will pass `&0;`. This approach
|
||||
// will not work, each & must be considered with regard to whether it
|
||||
// is followed by a 100% syntactically valid entity or not, and escaped
|
||||
// if it is not. If this bothers you, don't set parser.decodeEntities
|
||||
// to false. (The default is true.)
|
||||
s = s.replace(/&(?![a-zA-Z0-9#]{1,20};)/g, '&') // Match ampersands not part of existing HTML entity
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>');
|
||||
if (quote) {
|
||||
s = s.replace(/"/g, '"');
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function naughtyHref(name, href) {
|
||||
// Browsers ignore character codes of 32 (space) and below in a surprising
|
||||
// number of situations. Start reading here:
|
||||
// https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Embedded_tab
|
||||
// eslint-disable-next-line no-control-regex
|
||||
href = href.replace(/[\x00-\x20]+/g, '');
|
||||
// Clobber any comments in URLs, which the browser might
|
||||
// interpret inside an XML data island, allowing
|
||||
// a javascript: URL to be snuck through
|
||||
href = href.replace(/<!--.*?-->/g, '');
|
||||
// Case insensitive so we don't get faked out by JAVASCRIPT #1
|
||||
// Allow more characters after the first so we don't get faked
|
||||
// out by certain schemes browsers accept
|
||||
const matches = href.match(/^([a-zA-Z][a-zA-Z0-9.\-+]*):/);
|
||||
if (!matches) {
|
||||
// Protocol-relative URL starting with any combination of '/' and '\'
|
||||
if (href.match(/^[/\\]{2}/)) {
|
||||
return !options.allowProtocolRelative;
|
||||
}
|
||||
|
||||
// No scheme
|
||||
return false;
|
||||
}
|
||||
const scheme = matches[1].toLowerCase();
|
||||
|
||||
if (has(options.allowedSchemesByTag, name)) {
|
||||
return options.allowedSchemesByTag[name].indexOf(scheme) === -1;
|
||||
}
|
||||
|
||||
return !options.allowedSchemes || options.allowedSchemes.indexOf(scheme) === -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters user input css properties by allowlisted regex attributes.
|
||||
* Modifies the abstractSyntaxTree object.
|
||||
*
|
||||
* @param {object} abstractSyntaxTree - Object representation of CSS attributes.
|
||||
* @property {array[Declaration]} abstractSyntaxTree.nodes[0] - Each object cointains prop and value key, i.e { prop: 'color', value: 'red' }.
|
||||
* @param {object} allowedStyles - Keys are properties (i.e color), value is list of permitted regex rules (i.e /green/i).
|
||||
* @return {object} - The modified tree.
|
||||
*/
|
||||
// function filterCss(abstractSyntaxTree, allowedStyles) {
|
||||
// if (!allowedStyles) {
|
||||
// return abstractSyntaxTree;
|
||||
// }
|
||||
|
||||
// const astRules = abstractSyntaxTree.nodes[0];
|
||||
// let selectedRule;
|
||||
|
||||
// // Merge global and tag-specific styles into new AST.
|
||||
// if (allowedStyles[astRules.selector] && allowedStyles['*']) {
|
||||
// selectedRule = deepmerge(
|
||||
// allowedStyles[astRules.selector],
|
||||
// allowedStyles['*']
|
||||
// );
|
||||
// } else {
|
||||
// selectedRule = allowedStyles[astRules.selector] || allowedStyles['*'];
|
||||
// }
|
||||
|
||||
// if (selectedRule) {
|
||||
// abstractSyntaxTree.nodes[0].nodes = astRules.nodes.reduce(filterDeclarations(selectedRule), []);
|
||||
// }
|
||||
|
||||
// return abstractSyntaxTree;
|
||||
// }
|
||||
|
||||
/**
|
||||
* Extracts the style attributes from an AbstractSyntaxTree and formats those
|
||||
* values in the inline style attribute format.
|
||||
*
|
||||
* @param {AbstractSyntaxTree} filteredAST
|
||||
* @return {string} - Example: "color:yellow;text-align:center !important;font-family:helvetica;"
|
||||
*/
|
||||
function stringifyStyleAttributes(filteredAST) {
|
||||
return filteredAST.nodes[0].nodes
|
||||
.reduce(function (extractedAttributes, attrObject) {
|
||||
extractedAttributes.push(
|
||||
`${attrObject.prop}:${attrObject.value}${attrObject.important ? ' !important' : ''}`
|
||||
);
|
||||
return extractedAttributes;
|
||||
}, [])
|
||||
.join(';');
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters the existing attributes for the given property. Discards any attributes
|
||||
* which don't match the allowlist.
|
||||
*
|
||||
* @param {object} selectedRule - Example: { color: red, font-family: helvetica }
|
||||
* @param {array} allowedDeclarationsList - List of declarations which pass the allowlist.
|
||||
* @param {object} attributeObject - Object representing the current css property.
|
||||
* @property {string} attributeObject.type - Typically 'declaration'.
|
||||
* @property {string} attributeObject.prop - The CSS property, i.e 'color'.
|
||||
* @property {string} attributeObject.value - The corresponding value to the css property, i.e 'red'.
|
||||
* @return {function} - When used in Array.reduce, will return an array of Declaration objects
|
||||
*/
|
||||
function filterDeclarations(selectedRule) {
|
||||
return function (allowedDeclarationsList, attributeObject) {
|
||||
// If this property is allowlisted...
|
||||
if (has(selectedRule, attributeObject.prop)) {
|
||||
const matchesRegex = selectedRule[attributeObject.prop].some(function (regularExpression) {
|
||||
return regularExpression.test(attributeObject.value);
|
||||
});
|
||||
|
||||
if (matchesRegex) {
|
||||
allowedDeclarationsList.push(attributeObject);
|
||||
}
|
||||
}
|
||||
return allowedDeclarationsList;
|
||||
};
|
||||
}
|
||||
|
||||
function filterClasses(classes, allowed, allowedGlobs) {
|
||||
if (!allowed) {
|
||||
// The class attribute is allowed without filtering on this tag
|
||||
return classes;
|
||||
}
|
||||
classes = classes.split(/\s+/);
|
||||
return classes.filter(function (clss) {
|
||||
return allowed.indexOf(clss) !== -1 || allowedGlobs.some(function (glob) {
|
||||
return glob.test(clss);
|
||||
});
|
||||
}).join(' ');
|
||||
}
|
||||
}
|
||||
|
||||
// Defaults are accessible to you so that you can use them as a starting point
|
||||
// programmatically if you wish
|
||||
|
||||
const htmlParserDefaults = {
|
||||
decodeEntities: true
|
||||
};
|
||||
sanitizeHtml.defaults = {
|
||||
allowedTags: [
|
||||
// Sections derived from MDN element categories and limited to the more
|
||||
// benign categories.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTML/Element
|
||||
// Content sectioning
|
||||
'address', 'article', 'aside', 'footer', 'header',
|
||||
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hgroup',
|
||||
'main', 'nav', 'section',
|
||||
// Text content
|
||||
'blockquote', 'dd', 'div', 'dl', 'dt', 'figcaption', 'figure',
|
||||
'hr', 'li', 'main', 'ol', 'p', 'pre', 'ul',
|
||||
// Inline text semantics
|
||||
'a', 'abbr', 'b', 'bdi', 'bdo', 'br', 'cite', 'code', 'data', 'dfn',
|
||||
'em', 'i', 'kbd', 'mark', 'q',
|
||||
'rb', 'rp', 'rt', 'rtc', 'ruby',
|
||||
's', 'samp', 'small', 'span', 'strong', 'sub', 'sup', 'time', 'u', 'var', 'wbr',
|
||||
// Table content
|
||||
'caption', 'col', 'colgroup', 'table', 'tbody', 'td', 'tfoot', 'th',
|
||||
'thead', 'tr'
|
||||
],
|
||||
disallowedTagsMode: 'discard',
|
||||
allowedAttributes: {
|
||||
a: ['href', 'name', 'target'],
|
||||
// We don't currently allow img itself by default, but
|
||||
// these attributes would make sense if we did.
|
||||
img: ['src', 'srcset', 'alt', 'title', 'width', 'height', 'loading']
|
||||
},
|
||||
// Lots of these won't come up by default because we don't allow them
|
||||
selfClosing: ['img', 'br', 'hr', 'area', 'base', 'basefont', 'input', 'link', 'meta'],
|
||||
// URL schemes we permit
|
||||
allowedSchemes: ['http', 'https', 'ftp', 'mailto', 'tel'],
|
||||
allowedSchemesByTag: {},
|
||||
allowedSchemesAppliedToAttributes: ['href', 'src', 'cite'],
|
||||
allowProtocolRelative: true,
|
||||
enforceHtmlBoundary: false
|
||||
};
|
||||
|
||||
sanitizeHtml.simpleTransform = function (newTagName, newAttribs, merge) {
|
||||
merge = (merge === undefined) ? true : merge;
|
||||
newAttribs = newAttribs || {};
|
||||
|
||||
return function (tagName, attribs) {
|
||||
let attrib;
|
||||
if (merge) {
|
||||
for (attrib in newAttribs) {
|
||||
attribs[attrib] = newAttribs[attrib];
|
||||
}
|
||||
} else {
|
||||
attribs = newAttribs;
|
||||
}
|
||||
|
||||
return {
|
||||
tagName: newTagName,
|
||||
attribs: attribs
|
||||
};
|
||||
};
|
||||
};
|
||||
4
server/libs/uaParserJs.js
Normal file
4
server/libs/uaParserJs.js
Normal file
File diff suppressed because one or more lines are too long
|
|
@ -1,11 +1,16 @@
|
|||
const Path = require('path')
|
||||
const date = require('date-and-time')
|
||||
const serverVersion = require('../../package.json').version
|
||||
const { PlayMethod } = require('../utils/constants')
|
||||
const PlaybackSession = require('../objects/PlaybackSession')
|
||||
const DeviceInfo = require('../objects/DeviceInfo')
|
||||
const Stream = require('../objects/Stream')
|
||||
const Logger = require('../Logger')
|
||||
const fs = require('fs-extra')
|
||||
|
||||
const uaParserJs = require('../libs/uaParserJs')
|
||||
const requestIp = require('../libs/requestIp')
|
||||
|
||||
class PlaybackSessionManager {
|
||||
constructor(db, emitter, clientEmitter) {
|
||||
this.db = db
|
||||
|
|
@ -27,8 +32,21 @@ class PlaybackSessionManager {
|
|||
return session ? session.stream : null
|
||||
}
|
||||
|
||||
async startSessionRequest(user, libraryItem, episodeId, options, res) {
|
||||
const session = await this.startSession(user, libraryItem, episodeId, options)
|
||||
getDeviceInfo(req) {
|
||||
const ua = uaParserJs(req.headers['user-agent'])
|
||||
const ip = requestIp.getClientIp(req)
|
||||
const clientDeviceInfo = req.body ? req.body.deviceInfo || null : null // From mobile client
|
||||
|
||||
const deviceInfo = new DeviceInfo()
|
||||
deviceInfo.setData(ip, ua, clientDeviceInfo, serverVersion)
|
||||
return deviceInfo
|
||||
}
|
||||
|
||||
async startSessionRequest(req, res, episodeId) {
|
||||
const deviceInfo = this.getDeviceInfo(req)
|
||||
|
||||
const { user, libraryItem, body: options } = req
|
||||
const session = await this.startSession(user, deviceInfo, libraryItem, episodeId, options)
|
||||
res.json(session.toJSONForClient(libraryItem))
|
||||
}
|
||||
|
||||
|
|
@ -84,7 +102,7 @@ class PlaybackSessionManager {
|
|||
res.sendStatus(200)
|
||||
}
|
||||
|
||||
async startSession(user, libraryItem, episodeId, options) {
|
||||
async startSession(user, deviceInfo, libraryItem, episodeId, options) {
|
||||
// Close any sessions already open for user
|
||||
var userSessions = this.sessions.filter(playbackSession => playbackSession.userId === user.id)
|
||||
for (const session of userSessions) {
|
||||
|
|
@ -99,7 +117,7 @@ class PlaybackSessionManager {
|
|||
var userStartTime = 0
|
||||
if (userProgress) userStartTime = Number.parseFloat(userProgress.currentTime) || 0
|
||||
const newPlaybackSession = new PlaybackSession()
|
||||
newPlaybackSession.setData(libraryItem, user, mediaPlayer, episodeId)
|
||||
newPlaybackSession.setData(libraryItem, user, mediaPlayer, deviceInfo, userStartTime, episodeId)
|
||||
|
||||
var audioTracks = []
|
||||
if (shouldDirectPlay) {
|
||||
|
|
@ -122,7 +140,6 @@ class PlaybackSessionManager {
|
|||
})
|
||||
}
|
||||
|
||||
newPlaybackSession.currentTime = userStartTime
|
||||
newPlaybackSession.audioTracks = audioTracks
|
||||
|
||||
// Will save on the first sync
|
||||
|
|
|
|||
74
server/objects/DeviceInfo.js
Normal file
74
server/objects/DeviceInfo.js
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
class DeviceInfo {
|
||||
constructor(deviceInfo = null) {
|
||||
this.ipAddress = null
|
||||
|
||||
// From User Agent (see: https://www.npmjs.com/package/ua-parser-js)
|
||||
this.browserName = null
|
||||
this.browserVersion = null
|
||||
this.osName = null
|
||||
this.osVersion = null
|
||||
this.deviceType = null
|
||||
|
||||
// From client
|
||||
this.clientVersion = null
|
||||
this.manufacturer = null
|
||||
this.model = null
|
||||
this.sdkVersion = null // Android Only
|
||||
|
||||
this.serverVersion = null
|
||||
|
||||
if (deviceInfo) {
|
||||
this.construct(deviceInfo)
|
||||
}
|
||||
}
|
||||
|
||||
construct(deviceInfo) {
|
||||
for (const key in deviceInfo) {
|
||||
if (deviceInfo[key] !== undefined && this[key] !== undefined) {
|
||||
this[key] = deviceInfo[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
toJSON() {
|
||||
const obj = {
|
||||
ipAddress: this.ipAddress,
|
||||
browserName: this.browserName,
|
||||
browserVersion: this.browserVersion,
|
||||
osName: this.osName,
|
||||
osVersion: this.osVersion,
|
||||
deviceType: this.deviceType,
|
||||
clientVersion: this.clientVersion,
|
||||
manufacturer: this.manufacturer,
|
||||
model: this.model,
|
||||
sdkVersion: this.sdkVersion,
|
||||
serverVersion: this.serverVersion
|
||||
}
|
||||
for (const key in obj) {
|
||||
if (obj[key] === null || obj[key] === undefined) {
|
||||
delete obj[key]
|
||||
}
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
setData(ip, ua, clientDeviceInfo, serverVersion) {
|
||||
this.ipAddress = ip || null
|
||||
|
||||
const uaObj = ua || {}
|
||||
this.browserName = uaObj.browser.name || null
|
||||
this.browserVersion = uaObj.browser.version || null
|
||||
this.osName = uaObj.os.name || null
|
||||
this.osVersion = uaObj.os.version || null
|
||||
this.deviceType = uaObj.device.type || null
|
||||
|
||||
var cdi = clientDeviceInfo || {}
|
||||
this.clientVersion = cdi.clientVersion || null
|
||||
this.manufacturer = cdi.manufacturer || null
|
||||
this.model = cdi.model || null
|
||||
this.sdkVersion = cdi.sdkVersion || null
|
||||
|
||||
this.serverVersion = serverVersion || null
|
||||
}
|
||||
}
|
||||
module.exports = DeviceInfo
|
||||
|
|
@ -3,11 +3,13 @@ const { getId } = require('../utils/index')
|
|||
const { PlayMethod } = require('../utils/constants')
|
||||
const BookMetadata = require('./metadata/BookMetadata')
|
||||
const PodcastMetadata = require('./metadata/PodcastMetadata')
|
||||
const DeviceInfo = require('./DeviceInfo')
|
||||
|
||||
class PlaybackSession {
|
||||
constructor(session) {
|
||||
this.id = null
|
||||
this.userId = null
|
||||
this.libraryId = null
|
||||
this.libraryItemId = null
|
||||
this.episodeId = null
|
||||
|
||||
|
|
@ -21,18 +23,21 @@ class PlaybackSession {
|
|||
|
||||
this.playMethod = null
|
||||
this.mediaPlayer = null
|
||||
this.deviceInfo = null
|
||||
|
||||
this.date = null
|
||||
this.dayOfWeek = null
|
||||
|
||||
this.timeListening = null
|
||||
this.startTime = null // media current time at start of playback
|
||||
this.currentTime = 0 // Last current time set
|
||||
|
||||
this.startedAt = null
|
||||
this.updatedAt = null
|
||||
|
||||
// Not saved in DB
|
||||
this.lastSave = 0
|
||||
this.audioTracks = []
|
||||
this.currentTime = 0
|
||||
this.stream = null
|
||||
|
||||
if (session) {
|
||||
|
|
@ -43,8 +48,8 @@ class PlaybackSession {
|
|||
toJSON() {
|
||||
return {
|
||||
id: this.id,
|
||||
sessionType: this.sessionType,
|
||||
userId: this.userId,
|
||||
libraryId: this.libraryId,
|
||||
libraryItemId: this.libraryItemId,
|
||||
episodeId: this.episodeId,
|
||||
mediaType: this.mediaType,
|
||||
|
|
@ -56,10 +61,13 @@ class PlaybackSession {
|
|||
duration: this.duration,
|
||||
playMethod: this.playMethod,
|
||||
mediaPlayer: this.mediaPlayer,
|
||||
deviceInfo: this.deviceInfo ? this.deviceInfo.toJSON() : null,
|
||||
date: this.date,
|
||||
dayOfWeek: this.dayOfWeek,
|
||||
timeListening: this.timeListening,
|
||||
lastUpdate: this.lastUpdate,
|
||||
startTime: this.startTime,
|
||||
currentTime: this.currentTime,
|
||||
startedAt: this.startedAt,
|
||||
updatedAt: this.updatedAt
|
||||
}
|
||||
}
|
||||
|
|
@ -67,8 +75,8 @@ class PlaybackSession {
|
|||
toJSONForClient(libraryItem) {
|
||||
return {
|
||||
id: this.id,
|
||||
sessionType: this.sessionType,
|
||||
userId: this.userId,
|
||||
libraryId: this.libraryId,
|
||||
libraryItemId: this.libraryItemId,
|
||||
episodeId: this.episodeId,
|
||||
mediaType: this.mediaType,
|
||||
|
|
@ -80,27 +88,30 @@ class PlaybackSession {
|
|||
duration: this.duration,
|
||||
playMethod: this.playMethod,
|
||||
mediaPlayer: this.mediaPlayer,
|
||||
deviceInfo: this.deviceInfo ? this.deviceInfo.toJSON() : null,
|
||||
date: this.date,
|
||||
dayOfWeek: this.dayOfWeek,
|
||||
timeListening: this.timeListening,
|
||||
lastUpdate: this.lastUpdate,
|
||||
startTime: this.startTime,
|
||||
currentTime: this.currentTime,
|
||||
startedAt: this.startedAt,
|
||||
updatedAt: this.updatedAt,
|
||||
audioTracks: this.audioTracks.map(at => at.toJSON()),
|
||||
currentTime: this.currentTime,
|
||||
libraryItem: libraryItem.toJSONExpanded()
|
||||
}
|
||||
}
|
||||
|
||||
construct(session) {
|
||||
this.id = session.id
|
||||
this.sessionType = session.sessionType
|
||||
this.userId = session.userId
|
||||
this.libraryId = session.libraryId || null
|
||||
this.libraryItemId = session.libraryItemId
|
||||
this.episodeId = session.episodeId
|
||||
this.mediaType = session.mediaType
|
||||
this.duration = session.duration
|
||||
this.playMethod = session.playMethod
|
||||
this.mediaPlayer = session.mediaPlayer || null
|
||||
this.deviceInfo = new DeviceInfo(session.deviceInfo)
|
||||
this.chapters = session.chapters || []
|
||||
|
||||
this.mediaMetadata = null
|
||||
|
|
@ -118,6 +129,9 @@ class PlaybackSession {
|
|||
this.dayOfWeek = session.dayOfWeek
|
||||
|
||||
this.timeListening = session.timeListening || null
|
||||
this.startTime = session.startTime || 0
|
||||
this.currentTime = session.currentTime || 0
|
||||
|
||||
this.startedAt = session.startedAt
|
||||
this.updatedAt = session.updatedAt || null
|
||||
}
|
||||
|
|
@ -127,9 +141,10 @@ class PlaybackSession {
|
|||
return Math.max(0, Math.min(this.currentTime / this.duration, 1))
|
||||
}
|
||||
|
||||
setData(libraryItem, user, mediaPlayer, episodeId = null) {
|
||||
setData(libraryItem, user, mediaPlayer, deviceInfo, startTime, episodeId = null) {
|
||||
this.id = getId('play')
|
||||
this.userId = user.id
|
||||
this.libraryId = libraryItem.libraryId
|
||||
this.libraryItemId = libraryItem.id
|
||||
this.episodeId = episodeId
|
||||
this.mediaType = libraryItem.mediaType
|
||||
|
|
@ -146,8 +161,12 @@ class PlaybackSession {
|
|||
}
|
||||
|
||||
this.mediaPlayer = mediaPlayer
|
||||
this.deviceInfo = deviceInfo || new DeviceInfo()
|
||||
|
||||
this.timeListening = 0
|
||||
this.startTime = startTime
|
||||
this.currentTime = startTime
|
||||
|
||||
this.date = date.format(new Date(), 'YYYY-MM-DD')
|
||||
this.dayOfWeek = date.format(new Date(), 'dddd')
|
||||
this.startedAt = Date.now()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
const { stripHtml } = require('string-strip-html')
|
||||
const { getId } = require('../../utils/index')
|
||||
const AudioFile = require('../files/AudioFile')
|
||||
const AudioTrack = require('../files/AudioTrack')
|
||||
|
|
@ -78,8 +77,7 @@ class PodcastEpisode {
|
|||
episodeType: this.episodeType,
|
||||
title: this.title,
|
||||
subtitle: this.subtitle,
|
||||
// description: this.description,
|
||||
description: this.descriptionPlain, // Temporary stripping HTML until proper cleaning is implemented
|
||||
description: this.description,
|
||||
enclosure: this.enclosure ? { ...this.enclosure } : null,
|
||||
pubDate: this.pubDate,
|
||||
audioFile: this.audioFile.toJSON(),
|
||||
|
|
@ -108,10 +106,6 @@ class PodcastEpisode {
|
|||
if (this.episode) return `${this.episode} - ${this.title}`
|
||||
return this.title
|
||||
}
|
||||
get descriptionPlain() {
|
||||
if (!this.description) return ''
|
||||
return stripHtml(this.description).result
|
||||
}
|
||||
|
||||
setData(data, index = 1) {
|
||||
this.id = getId('ep')
|
||||
|
|
|
|||
|
|
@ -224,18 +224,10 @@ class Podcast {
|
|||
this.episodes.push(pe)
|
||||
}
|
||||
|
||||
setEpisodeOrder(episodeIds) {
|
||||
episodeIds.reverse() // episode Ids will already be in descending order
|
||||
this.episodes = this.episodes.map(ep => {
|
||||
var indexOf = episodeIds.findIndex(id => id === ep.id)
|
||||
ep.index = indexOf + 1
|
||||
return ep
|
||||
})
|
||||
this.episodes.sort((a, b) => b.index - a.index)
|
||||
}
|
||||
|
||||
reorderEpisodes() {
|
||||
var hasUpdates = false
|
||||
|
||||
// TODO: Sort by published date
|
||||
this.episodes = naturalSort(this.episodes).asc((ep) => ep.bestFilename)
|
||||
for (let i = 0; i < this.episodes.length; i++) {
|
||||
if (this.episodes[i].index !== (i + 1)) {
|
||||
|
|
|
|||
|
|
@ -1,16 +1,16 @@
|
|||
const axios = require('axios')
|
||||
const { stripHtml } = require('string-strip-html')
|
||||
const htmlSanitizer = require('../utils/htmlSanitizer')
|
||||
const Logger = require('../Logger')
|
||||
|
||||
class Audible {
|
||||
constructor() { }
|
||||
|
||||
cleanResult(item) {
|
||||
var { title, subtitle, asin, authors, narrators, publisherName, summary, releaseDate, image, genres, seriesPrimary, seriesSecondary, language } = item;
|
||||
var { title, subtitle, asin, authors, narrators, publisherName, summary, releaseDate, image, genres, seriesPrimary, seriesSecondary, language } = item
|
||||
|
||||
var series = []
|
||||
if(seriesPrimary) series.push(seriesPrimary)
|
||||
if(seriesSecondary) series.push(seriesSecondary)
|
||||
if (seriesPrimary) series.push(seriesPrimary)
|
||||
if (seriesSecondary) series.push(seriesSecondary)
|
||||
|
||||
var genresFiltered = genres ? genres.filter(g => g.type == "genre") : []
|
||||
var tagsFiltered = genres ? genres.filter(g => g.type == "tag") : []
|
||||
|
|
@ -22,12 +22,12 @@ class Audible {
|
|||
narrator: narrators ? narrators.map(({ name }) => name).join(', ') : null,
|
||||
publisher: publisherName,
|
||||
publishedYear: releaseDate ? releaseDate.split('-')[0] : null,
|
||||
description: summary ? stripHtml(summary).result : null,
|
||||
description: summary ? htmlSanitizer.stripAllTags(summary) : null,
|
||||
cover: image,
|
||||
asin,
|
||||
genres: genresFiltered.length > 0 ? genresFiltered.map(({ name }) => name).join(', ') : null,
|
||||
tags: tagsFiltered.length > 0 ? tagsFiltered.map(({ name }) => name).join(', ') : null,
|
||||
series: series != [] ? series.map(({name, position}) => ({ series: name, volumeNumber: position })) : null,
|
||||
series: series != [] ? series.map(({ name, position }) => ({ series: name, volumeNumber: position })) : null,
|
||||
language: language ? language.charAt(0).toUpperCase() + language.slice(1) : null
|
||||
}
|
||||
}
|
||||
|
|
@ -49,17 +49,17 @@ class Audible {
|
|||
})
|
||||
}
|
||||
|
||||
async search(title, author, asin) {
|
||||
async search(title, author, asin) {
|
||||
var items
|
||||
if(asin) {
|
||||
if (asin) {
|
||||
items = [await this.asinSearch(asin)]
|
||||
}
|
||||
|
||||
|
||||
if (!items && this.isProbablyAsin(title)) {
|
||||
items = [await this.asinSearch(title)]
|
||||
}
|
||||
|
||||
if(!items) {
|
||||
if (!items) {
|
||||
var queryObj = {
|
||||
num_results: '10',
|
||||
products_sort_by: 'Relevance',
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const axios = require('axios')
|
||||
const Logger = require('../Logger')
|
||||
const { stripHtml } = require('string-strip-html')
|
||||
const htmlSanitizer = require('../utils/htmlSanitizer')
|
||||
|
||||
class iTunes {
|
||||
constructor() { }
|
||||
|
||||
|
|
@ -64,7 +65,7 @@ class iTunes {
|
|||
artistId: data.artistId,
|
||||
title: data.collectionName,
|
||||
author: data.artistName,
|
||||
description: stripHtml(data.description || '').result,
|
||||
description: htmlSanitizer.stripAllTags(data.description || ''),
|
||||
publishedYear: data.releaseDate ? data.releaseDate.split('-')[0] : null,
|
||||
genres: data.primaryGenreName ? [data.primaryGenreName] : [],
|
||||
cover: this.getCoverArtwork(data)
|
||||
|
|
@ -83,7 +84,8 @@ class iTunes {
|
|||
artistId: data.artistId || null,
|
||||
title: data.collectionName,
|
||||
artistName: data.artistName,
|
||||
description: stripHtml(data.description || '').result,
|
||||
description: htmlSanitizer.sanitize(data.description || ''),
|
||||
descriptionPlain: htmlSanitizer.stripAllTags(data.description || ''),
|
||||
releaseDate: data.releaseDate,
|
||||
genres: data.genres || [],
|
||||
cover: this.getCoverArtwork(data),
|
||||
|
|
|
|||
|
|
@ -90,8 +90,6 @@ class ApiRouter {
|
|||
this.router.post('/items/:id/play', LibraryItemController.middleware.bind(this), LibraryItemController.startPlaybackSession.bind(this))
|
||||
this.router.post('/items/:id/play/:episodeId', LibraryItemController.middleware.bind(this), LibraryItemController.startEpisodePlaybackSession.bind(this))
|
||||
this.router.patch('/items/:id/tracks', LibraryItemController.middleware.bind(this), LibraryItemController.updateTracks.bind(this))
|
||||
this.router.patch('/items/:id/episodes', LibraryItemController.middleware.bind(this), LibraryItemController.updateEpisodes.bind(this))
|
||||
this.router.delete('/items/:id/episode/:episodeId', LibraryItemController.middleware.bind(this), LibraryItemController.removeEpisode.bind(this))
|
||||
this.router.get('/items/:id/scan', LibraryItemController.middleware.bind(this), LibraryItemController.scan.bind(this))
|
||||
this.router.get('/items/:id/audio-metadata', LibraryItemController.middleware.bind(this), LibraryItemController.updateAudioFileMetadata.bind(this))
|
||||
this.router.post('/items/:id/chapters', LibraryItemController.middleware.bind(this), LibraryItemController.updateMediaChapters.bind(this))
|
||||
|
|
@ -111,7 +109,7 @@ class ApiRouter {
|
|||
this.router.patch('/users/:id', UserController.update.bind(this))
|
||||
this.router.delete('/users/:id', UserController.delete.bind(this))
|
||||
|
||||
this.router.get('/users/:id/listening-sessions', UserController.getListeningStats.bind(this))
|
||||
this.router.get('/users/:id/listening-sessions', UserController.getListeningSessions.bind(this))
|
||||
this.router.get('/users/:id/listening-stats', UserController.getListeningStats.bind(this))
|
||||
|
||||
//
|
||||
|
|
@ -189,6 +187,7 @@ class ApiRouter {
|
|||
this.router.get('/podcasts/:id/clear-queue', PodcastController.middleware.bind(this), PodcastController.clearEpisodeDownloadQueue.bind(this))
|
||||
this.router.post('/podcasts/:id/download-episodes', PodcastController.middleware.bind(this), PodcastController.downloadEpisodes.bind(this))
|
||||
this.router.patch('/podcasts/:id/episode/:episodeId', PodcastController.middleware.bind(this), PodcastController.updateEpisode.bind(this))
|
||||
this.router.delete('/podcasts/:id/episode/:episodeId', PodcastController.middleware.bind(this), PodcastController.removeEpisode.bind(this))
|
||||
|
||||
//
|
||||
// Misc Routes
|
||||
|
|
|
|||
|
|
@ -17,7 +17,9 @@ class StaticRouter {
|
|||
if (!item) return res.status(404).send('Item not found with id ' + req.params.id)
|
||||
|
||||
var remainingPath = req.params['0']
|
||||
var fullPath = Path.join(item.path, remainingPath)
|
||||
var fullPath = null
|
||||
if (item.isFile) fullPath = item.path
|
||||
else fullPath = Path.join(item.path, remainingPath)
|
||||
res.sendFile(fullPath)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,8 @@ class Scanner {
|
|||
}
|
||||
|
||||
async scanLibraryItem(libraryMediaType, folder, libraryItem) {
|
||||
var libraryItemData = await getLibraryItemFileData(libraryMediaType, folder, libraryItem.path, this.db.serverSettings)
|
||||
// TODO: Support for single media item
|
||||
var libraryItemData = await getLibraryItemFileData(libraryMediaType, folder, libraryItem.path, false, this.db.serverSettings)
|
||||
if (!libraryItemData) {
|
||||
return ScanResult.NOTHING
|
||||
}
|
||||
|
|
@ -499,7 +500,11 @@ class Scanner {
|
|||
continue;
|
||||
}
|
||||
var relFilePaths = folderGroups[folderId].fileUpdates.map(fileUpdate => fileUpdate.relPath)
|
||||
var fileUpdateGroup = groupFilesIntoLibraryItemPaths(relFilePaths, true)
|
||||
var fileUpdateGroup = groupFilesIntoLibraryItemPaths(library.mediaType, relFilePaths)
|
||||
if (!Object.keys(fileUpdateGroup).length) {
|
||||
Logger.info(`[Scanner] No important changes to scan for in folder "${folderId}"`)
|
||||
continue;
|
||||
}
|
||||
var folderScanResults = await this.scanFolderUpdates(library, folder, fileUpdateGroup)
|
||||
Logger.debug(`[Scanner] Folder scan results`, folderScanResults)
|
||||
}
|
||||
|
|
@ -513,6 +518,8 @@ class Scanner {
|
|||
// Test Case: Moving audio files from library item folder to author folder should trigger a re-scan of the item
|
||||
var updateGroup = { ...fileUpdateGroup }
|
||||
for (const itemDir in updateGroup) {
|
||||
if (itemDir == fileUpdateGroup[itemDir]) continue; // Media in root path
|
||||
|
||||
var itemDirNestedFiles = fileUpdateGroup[itemDir].filter(b => b.includes('/'))
|
||||
if (!itemDirNestedFiles.length) continue;
|
||||
|
||||
|
|
@ -582,7 +589,8 @@ class Scanner {
|
|||
}
|
||||
|
||||
Logger.debug(`[Scanner] Folder update group must be a new item "${itemDir}" in library "${library.name}"`)
|
||||
var newLibraryItem = await this.scanPotentialNewLibraryItem(library.mediaType, folder, fullPath)
|
||||
var isSingleMediaItem = itemDir === fileUpdateGroup[itemDir]
|
||||
var newLibraryItem = await this.scanPotentialNewLibraryItem(library.mediaType, folder, fullPath, isSingleMediaItem)
|
||||
if (newLibraryItem) {
|
||||
await this.createNewAuthorsAndSeries(newLibraryItem)
|
||||
await this.db.insertLibraryItem(newLibraryItem)
|
||||
|
|
@ -594,8 +602,8 @@ class Scanner {
|
|||
return itemGroupingResults
|
||||
}
|
||||
|
||||
async scanPotentialNewLibraryItem(libraryMediaType, folder, fullPath) {
|
||||
var libraryItemData = await getLibraryItemFileData(libraryMediaType, folder, fullPath, this.db.serverSettings)
|
||||
async scanPotentialNewLibraryItem(libraryMediaType, folder, fullPath, isSingleMediaItem = false) {
|
||||
var libraryItemData = await getLibraryItemFileData(libraryMediaType, folder, fullPath, isSingleMediaItem, this.db.serverSettings)
|
||||
if (!libraryItemData) return null
|
||||
var serverSettings = this.db.serverSettings
|
||||
return this.scanNewLibraryItem(libraryItemData, libraryMediaType, serverSettings.scannerPreferAudioMetadata, serverSettings.scannerPreferOpfMetadata, serverSettings.scannerFindCovers)
|
||||
|
|
|
|||
28
server/utils/htmlSanitizer.js
Normal file
28
server/utils/htmlSanitizer.js
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
const sanitizeHtml = require('../libs/sanitizeHtml')
|
||||
|
||||
function sanitize(html) {
|
||||
const sanitizerOptions = {
|
||||
allowedTags: [
|
||||
'p', 'ol', 'ul', 'li', 'a', 'strong', 'em', 'del'
|
||||
],
|
||||
disallowedTagsMode: 'discard',
|
||||
allowedAttributes: {
|
||||
a: ['href', 'name', 'target']
|
||||
},
|
||||
allowedSchemes: ['https'],
|
||||
allowProtocolRelative: false
|
||||
}
|
||||
|
||||
return sanitizeHtml(html, sanitizerOptions)
|
||||
}
|
||||
module.exports.sanitize = sanitize
|
||||
|
||||
function stripAllTags(html) {
|
||||
const sanitizerOptions = {
|
||||
allowedTags: [],
|
||||
disallowedTagsMode: 'discard'
|
||||
}
|
||||
|
||||
return sanitizeHtml(html, sanitizerOptions)
|
||||
}
|
||||
module.exports.stripAllTags = stripAllTags
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
const { xmlToJSON } = require('./index')
|
||||
const { stripHtml } = require("string-strip-html")
|
||||
const htmlSanitizer = require('./htmlSanitizer')
|
||||
|
||||
function parseCreators(metadata) {
|
||||
if (!metadata['dc:creator']) return null
|
||||
|
|
@ -57,8 +57,7 @@ function fetchDescription(metadata) {
|
|||
// check if description is HTML or plain text. only plain text allowed
|
||||
// calibre stores < and > as < and >
|
||||
description = description.replace(/</g, '<').replace(/>/g, '>')
|
||||
if (description.match(/<!DOCTYPE html>|<\/?\s*[a-z-][^>]*\s*>|(\&(?:[\w\d]+|#\d+|#x[a-f\d]+);)/)) return stripHtml(description).result
|
||||
return description
|
||||
return htmlSanitizer.stripAllTags(description)
|
||||
}
|
||||
|
||||
function fetchGenres(metadata) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
const Logger = require('../Logger')
|
||||
const { xmlToJSON } = require('./index')
|
||||
const { stripHtml } = require('string-strip-html')
|
||||
const htmlSanitizer = require('../utils/htmlSanitizer')
|
||||
|
||||
function extractFirstArrayItem(json, key) {
|
||||
if (!json[key] || !json[key].length) return null
|
||||
|
|
@ -55,8 +55,9 @@ function extractPodcastMetadata(channel) {
|
|||
}
|
||||
|
||||
if (channel['description']) {
|
||||
metadata.description = extractFirstArrayItem(channel, 'description')
|
||||
metadata.descriptionPlain = stripHtml(metadata.description || '').result
|
||||
const rawDescription = extractFirstArrayItem(channel, 'description') || ''
|
||||
metadata.description = htmlSanitizer.sanitize(rawDescription)
|
||||
metadata.descriptionPlain = htmlSanitizer.stripAllTags(rawDescription)
|
||||
}
|
||||
|
||||
var arrayFields = ['title', 'language', 'itunes:explicit', 'itunes:author', 'pubDate', 'link']
|
||||
|
|
@ -80,9 +81,17 @@ function extractEpisodeData(item) {
|
|||
}
|
||||
}
|
||||
|
||||
// Full description with html
|
||||
if (item['content:encoded']) {
|
||||
const rawDescription = (extractFirstArrayItem(item, 'content:encoded') || '').trim()
|
||||
episode.description = htmlSanitizer.sanitize(rawDescription)
|
||||
}
|
||||
|
||||
// Supposed to be the plaintext description but not always followed
|
||||
if (item['description']) {
|
||||
episode.description = extractFirstArrayItem(item, 'description')
|
||||
episode.descriptionPlain = stripHtml(episode.description || '').result
|
||||
const rawDescription = extractFirstArrayItem(item, 'description') || ''
|
||||
if (!episode.description) episode.description = htmlSanitizer.sanitize(rawDescription)
|
||||
episode.descriptionPlain = htmlSanitizer.stripAllTags(rawDescription)
|
||||
}
|
||||
|
||||
var arrayFields = ['title', 'pubDate', 'itunes:episodeType', 'itunes:season', 'itunes:episode', 'itunes:author', 'itunes:duration', 'itunes:explicit', 'itunes:subtitle']
|
||||
|
|
|
|||
|
|
@ -17,11 +17,14 @@ function isMediaFile(mediaType, ext) {
|
|||
// TODO: Function needs to be re-done
|
||||
// Input: array of relative file paths
|
||||
// Output: map of files grouped into potential item dirs
|
||||
function groupFilesIntoLibraryItemPaths(paths) {
|
||||
// Step 1: Clean path, Remove leading "/", Filter out files in root dir
|
||||
function groupFilesIntoLibraryItemPaths(mediaType, paths) {
|
||||
// Step 1: Clean path, Remove leading "/", Filter out non-media files in root dir
|
||||
var pathsFiltered = paths.map(path => {
|
||||
return path.startsWith('/') ? path.slice(1) : path
|
||||
}).filter(path => Path.parse(path).dir)
|
||||
}).filter(path => {
|
||||
let parsedPath = Path.parse(path)
|
||||
return parsedPath.dir || (mediaType === 'book' && isMediaFile(mediaType, parsedPath.ext))
|
||||
})
|
||||
|
||||
// Step 2: Sort by least number of directories
|
||||
pathsFiltered.sort((a, b) => {
|
||||
|
|
@ -33,25 +36,30 @@ function groupFilesIntoLibraryItemPaths(paths) {
|
|||
// Step 3: Group files in dirs
|
||||
var itemGroup = {}
|
||||
pathsFiltered.forEach((path) => {
|
||||
var dirparts = Path.dirname(path).split('/')
|
||||
var dirparts = Path.dirname(path).split('/').filter(p => !!p && p !== '.') // dirname returns . if no directory
|
||||
var numparts = dirparts.length
|
||||
var _path = ''
|
||||
|
||||
// Iterate over directories in path
|
||||
for (let i = 0; i < numparts; i++) {
|
||||
var dirpart = dirparts.shift()
|
||||
_path = Path.posix.join(_path, dirpart)
|
||||
if (!numparts) {
|
||||
// Media file in root
|
||||
itemGroup[path] = path
|
||||
} else {
|
||||
// Iterate over directories in path
|
||||
for (let i = 0; i < numparts; i++) {
|
||||
var dirpart = dirparts.shift()
|
||||
_path = Path.posix.join(_path, dirpart)
|
||||
|
||||
if (itemGroup[_path]) { // Directory already has files, add file
|
||||
var relpath = Path.posix.join(dirparts.join('/'), Path.basename(path))
|
||||
itemGroup[_path].push(relpath)
|
||||
return
|
||||
} else if (!dirparts.length) { // This is the last directory, create group
|
||||
itemGroup[_path] = [Path.basename(path)]
|
||||
return
|
||||
} else if (dirparts.length === 1 && /^cd\d{1,3}$/i.test(dirparts[0])) { // Next directory is the last and is a CD dir, create group
|
||||
itemGroup[_path] = [Path.posix.join(dirparts[0], Path.basename(path))]
|
||||
return
|
||||
if (itemGroup[_path]) { // Directory already has files, add file
|
||||
var relpath = Path.posix.join(dirparts.join('/'), Path.basename(path))
|
||||
itemGroup[_path].push(relpath)
|
||||
return
|
||||
} else if (!dirparts.length) { // This is the last directory, create group
|
||||
itemGroup[_path] = [Path.basename(path)]
|
||||
return
|
||||
} else if (dirparts.length === 1 && /^cd\d{1,3}$/i.test(dirparts[0])) { // Next directory is the last and is a CD dir, create group
|
||||
itemGroup[_path] = [Path.posix.join(dirparts[0], Path.basename(path))]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
@ -62,9 +70,9 @@ module.exports.groupFilesIntoLibraryItemPaths = groupFilesIntoLibraryItemPaths
|
|||
// Input: array of relative file items (see recurseFiles)
|
||||
// Output: map of files grouped into potential libarary item dirs
|
||||
function groupFileItemsIntoLibraryItemDirs(mediaType, fileItems) {
|
||||
// Step 1: Filter out non-media files in root dir (with depth of 0)
|
||||
// Step 1: Filter out non-book-media files in root dir (with depth of 0)
|
||||
var itemsFiltered = fileItems.filter(i => {
|
||||
return i.deep > 0 || isMediaFile(mediaType, i.extension)
|
||||
return i.deep > 0 || (mediaType === 'book' && isMediaFile(mediaType, i.extension))
|
||||
})
|
||||
|
||||
// Step 2: Seperate media files and other files
|
||||
|
|
@ -128,7 +136,7 @@ function groupFileItemsIntoLibraryItemDirs(mediaType, fileItems) {
|
|||
}
|
||||
|
||||
function cleanFileObjects(libraryItemPath, files) {
|
||||
return Promise.all(files.map(async (file) => {
|
||||
return Promise.all(files.map(async(file) => {
|
||||
var filePath = Path.posix.join(libraryItemPath, file)
|
||||
var newLibraryFile = new LibraryFile()
|
||||
await newLibraryFile.setDataFromPath(filePath, file)
|
||||
|
|
@ -147,16 +155,6 @@ async function scanFolder(libraryMediaType, folder, serverSettings = {}) {
|
|||
}
|
||||
|
||||
var fileItems = await recurseFiles(folderPath)
|
||||
var basePath = folderPath
|
||||
|
||||
const isOpenAudibleFolder = fileItems.find(fi => fi.deep === 0 && fi.name === 'books.json')
|
||||
if (isOpenAudibleFolder) {
|
||||
Logger.info(`[scandir] Detected Open Audible Folder, looking in books folder`)
|
||||
basePath = Path.posix.join(folderPath, 'books')
|
||||
fileItems = await recurseFiles(basePath)
|
||||
Logger.debug(`[scandir] ${fileItems.length} files found in books folder`)
|
||||
}
|
||||
|
||||
var libraryItemGrouping = groupFileItemsIntoLibraryItemDirs(libraryMediaType, fileItems)
|
||||
|
||||
if (!Object.keys(libraryItemGrouping).length) {
|
||||
|
|
@ -175,10 +173,10 @@ async function scanFolder(libraryMediaType, folder, serverSettings = {}) {
|
|||
mediaMetadata: {
|
||||
title: Path.basename(libraryItemPath, Path.extname(libraryItemPath))
|
||||
},
|
||||
path: Path.posix.join(basePath, libraryItemPath),
|
||||
path: Path.posix.join(folderPath, libraryItemPath),
|
||||
relPath: libraryItemPath
|
||||
}
|
||||
fileObjs = await cleanFileObjects(basePath, [libraryItemPath])
|
||||
fileObjs = await cleanFileObjects(folderPath, [libraryItemPath])
|
||||
isFile = true
|
||||
} else {
|
||||
libraryItemData = getDataFromMediaDir(libraryMediaType, folderPath, libraryItemPath, serverSettings)
|
||||
|
|
@ -211,83 +209,16 @@ function getBookDataFromDir(folderPath, relPath, parseSubtitle = false) {
|
|||
relPath = relPath.replace(/\\/g, '/')
|
||||
var splitDir = relPath.split('/')
|
||||
|
||||
// Audio files will always be in the directory named for the title
|
||||
var [title, narrators] = getTitleAndNarrator(splitDir.pop())
|
||||
var folder = splitDir.pop() // Audio files will always be in the directory named for the title
|
||||
series = (splitDir.length > 1) ? splitDir.pop() : null // If there are at least 2 more directories, next furthest will be the series
|
||||
author = (splitDir.length > 0) ? splitDir.pop() : null // There could be many more directories, but only the top 3 are used for naming /author/series/title/
|
||||
|
||||
var series = null
|
||||
var author = null
|
||||
// If there are at least 2 more directories, next furthest will be the series
|
||||
if (splitDir.length > 1) series = splitDir.pop()
|
||||
if (splitDir.length > 0) author = splitDir.pop()
|
||||
// There could be many more directories, but only the top 3 are used for naming /author/series/title/
|
||||
|
||||
|
||||
// If in a series directory check for volume number match
|
||||
/* ACCEPTS
|
||||
Book 2 - Title Here - Subtitle Here
|
||||
Title Here - Subtitle Here - Vol 12
|
||||
Title Here - volume 9 - Subtitle Here
|
||||
Vol. 3 Title Here - Subtitle Here
|
||||
1980 - Book 2-Title Here
|
||||
Title Here-Volume 999-Subtitle Here
|
||||
2 - Book Title
|
||||
100 - Book Title
|
||||
0.5 - Book Title
|
||||
*/
|
||||
var volumeNumber = null
|
||||
if (series) {
|
||||
// Added 1.7.1: If title starts with a # that is 3 digits or less (or w/ 2 decimal), then use as volume number
|
||||
var volumeMatch = title.match(/^(\d{1,3}(?:\.\d{1,2})?) - ./)
|
||||
if (volumeMatch && volumeMatch.length > 1) {
|
||||
volumeNumber = volumeMatch[1]
|
||||
title = title.replace(`${volumeNumber} - `, '')
|
||||
} else {
|
||||
// Match volumes with decimal (OLD: /(-? ?)\b((?:Book|Vol.?|Volume) (\d{1,3}))\b( ?-?)/i)
|
||||
var volumeMatch = title.match(/(-? ?)\b((?:Book|Vol.?|Volume) (\d{0,3}(?:\.\d{1,2})?))\b( ?-?)/i)
|
||||
if (volumeMatch && volumeMatch.length > 3 && volumeMatch[2] && volumeMatch[3]) {
|
||||
volumeNumber = volumeMatch[3]
|
||||
var replaceChunk = volumeMatch[2]
|
||||
|
||||
// "1980 - Book 2-Title Here"
|
||||
// Group 1 would be "- "
|
||||
// Group 3 would be "-"
|
||||
// Only remove the first group
|
||||
if (volumeMatch[1]) {
|
||||
replaceChunk = volumeMatch[1] + replaceChunk
|
||||
} else if (volumeMatch[4]) {
|
||||
replaceChunk += volumeMatch[4]
|
||||
}
|
||||
title = title.replace(replaceChunk, '').trim()
|
||||
}
|
||||
}
|
||||
|
||||
if (volumeNumber != null && !isNaN(volumeNumber)) {
|
||||
volumeNumber = String(Number(volumeNumber)) // Strips leading zeros
|
||||
}
|
||||
}
|
||||
|
||||
var publishedYear = null
|
||||
// If Title is of format 1999 OR (1999) - Title, then use 1999 as publish year
|
||||
var publishYearMatch = title.match(/^(\(?[0-9]{4}\)?) - (.+)/)
|
||||
if (publishYearMatch && publishYearMatch.length > 2 && publishYearMatch[1]) {
|
||||
// Strip parentheses
|
||||
if (publishYearMatch[1].startsWith('(') && publishYearMatch[1].endsWith(')')) {
|
||||
publishYearMatch[1] = publishYearMatch[1].slice(1, -1)
|
||||
}
|
||||
if (!isNaN(publishYearMatch[1])) {
|
||||
publishedYear = publishYearMatch[1]
|
||||
title = publishYearMatch[2]
|
||||
}
|
||||
}
|
||||
|
||||
// Subtitle can be parsed from the title if user enabled
|
||||
// Subtitle is everything after " - "
|
||||
var subtitle = null
|
||||
if (parseSubtitle && title.includes(' - ')) {
|
||||
var splitOnSubtitle = title.split(' - ')
|
||||
title = splitOnSubtitle.shift()
|
||||
subtitle = splitOnSubtitle.join(' - ')
|
||||
}
|
||||
// The may contain various other pieces of metadata, these functions extract it.
|
||||
var [folder, narrators] = getNarrator(folder)
|
||||
if (series) { var [folder, sequence] = getSequence(folder) }
|
||||
var [folder, sequence] = series ? getSequence(folder) : [folder, null]
|
||||
var [folder, publishedYear] = getPublishedYear(folder)
|
||||
var [title, subtitle] = parseSubtitle ? getSubtitle(folder) : [folder, null]
|
||||
|
||||
return {
|
||||
mediaMetadata: {
|
||||
|
|
@ -295,7 +226,7 @@ function getBookDataFromDir(folderPath, relPath, parseSubtitle = false) {
|
|||
title,
|
||||
subtitle,
|
||||
series,
|
||||
sequence: volumeNumber,
|
||||
sequence,
|
||||
publishedYear,
|
||||
narrators,
|
||||
},
|
||||
|
|
@ -304,10 +235,65 @@ function getBookDataFromDir(folderPath, relPath, parseSubtitle = false) {
|
|||
}
|
||||
}
|
||||
|
||||
function getTitleAndNarrator(folder) {
|
||||
let pattern = /^(?<title>.*)\{(?<narrators>.*)\} *$/
|
||||
function getNarrator(folder) {
|
||||
let pattern = /^(?<title>.*) \{(?<narrators>.*)\}$/
|
||||
let match = folder.match(pattern)
|
||||
return match ? [match.groups.title.trimEnd(), match.groups.narrators] : [folder, null]
|
||||
return match ? [match.groups.title, match.groups.narrators] : [folder, null]
|
||||
}
|
||||
|
||||
function getSequence(folder) {
|
||||
// Valid ways of including a volume number:
|
||||
// [
|
||||
// 'Book 2 - Title - Subtitle',
|
||||
// 'Title - Subtitle - Vol 12',
|
||||
// 'Title - volume 9 - Subtitle',
|
||||
// 'Vol. 3 Title Here - Subtitle',
|
||||
// '1980 - Book 2 - Title',
|
||||
// 'Volume 12. Title - Subtitle',
|
||||
// '100 - Book Title',
|
||||
// '2 - Book Title',
|
||||
// '6. Title',
|
||||
// '0.5 - Book Title'
|
||||
// ]
|
||||
|
||||
// Matches a valid volume string. Also matches a book whose title starts with a 1 to 3 digit number. Will handle that later.
|
||||
let pattern = /^(?<volumeLabel>vol\.? |volume |book )?(?<sequence>\d{1,3}(?:\.\d{1,2})?)(?<trailingDot>\.?)(?: (?<suffix>.*))?/i
|
||||
|
||||
let volumeNumber = null
|
||||
let parts = folder.split(' - ')
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
let match = parts[i].match(pattern)
|
||||
|
||||
// This excludes '101 Dalmations' but includes '101. Dalmations'
|
||||
if (match && !(match.groups.suffix && !(match.groups.volumeLabel || match.groups.trailingDot))) {
|
||||
volumeNumber = match.groups.sequence
|
||||
parts[i] = match.groups.suffix
|
||||
if (!parts[i]) { parts.splice(i, 1) }
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
folder = parts.join(' - ')
|
||||
return [folder, volumeNumber]
|
||||
}
|
||||
|
||||
function getPublishedYear(folder) {
|
||||
var publishedYear = null
|
||||
|
||||
pattern = /^ *\(?([0-9]{4})\)? * - *(.+)/ //Matches #### - title or (####) - title
|
||||
var match = folder.match(pattern)
|
||||
if (match) {
|
||||
publishedYear = match[1]
|
||||
folder = match[2]
|
||||
}
|
||||
|
||||
return [folder, publishedYear]
|
||||
}
|
||||
|
||||
function getSubtitle(folder) {
|
||||
// Subtitle is everything after " - "
|
||||
var splitTitle = folder.split(' - ')
|
||||
return [splitTitle.shift(), splitTitle.join(' - ')]
|
||||
}
|
||||
|
||||
function getPodcastDataFromDir(folderPath, relPath) {
|
||||
|
|
@ -335,14 +321,34 @@ function getDataFromMediaDir(libraryMediaType, folderPath, relPath, serverSettin
|
|||
}
|
||||
|
||||
// Called from Scanner.js
|
||||
async function getLibraryItemFileData(libraryMediaType, folder, libraryItemPath, serverSettings = {}) {
|
||||
var fileItems = await recurseFiles(libraryItemPath)
|
||||
|
||||
async function getLibraryItemFileData(libraryMediaType, folder, libraryItemPath, isSingleMediaItem, serverSettings = {}) {
|
||||
libraryItemPath = libraryItemPath.replace(/\\/g, '/')
|
||||
var folderFullPath = folder.fullPath.replace(/\\/g, '/')
|
||||
|
||||
var libraryItemDir = libraryItemPath.replace(folderFullPath, '').slice(1)
|
||||
var libraryItemData = getDataFromMediaDir(libraryMediaType, folderFullPath, libraryItemDir, serverSettings)
|
||||
var libraryItemData = {}
|
||||
|
||||
var fileItems = []
|
||||
|
||||
if (isSingleMediaItem) { // Single media item in root of folder
|
||||
fileItems = [
|
||||
{
|
||||
fullpath: libraryItemPath,
|
||||
path: libraryItemDir // actually the relPath (only filename here)
|
||||
}
|
||||
]
|
||||
libraryItemData = {
|
||||
path: libraryItemPath, // full path
|
||||
relPath: libraryItemDir, // only filename
|
||||
mediaMetadata: {
|
||||
title: Path.basename(libraryItemDir, Path.extname(libraryItemDir))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fileItems = await recurseFiles(libraryItemPath)
|
||||
libraryItemData = getDataFromMediaDir(libraryMediaType, folderFullPath, libraryItemDir, serverSettings)
|
||||
}
|
||||
|
||||
var libraryItemDirStats = await getFileTimestampsWithIno(libraryItemData.path)
|
||||
var libraryItem = {
|
||||
ino: libraryItemDirStats.ino,
|
||||
|
|
@ -353,6 +359,7 @@ async function getLibraryItemFileData(libraryMediaType, folder, libraryItemPath,
|
|||
libraryId: folder.libraryId,
|
||||
path: libraryItemData.path,
|
||||
relPath: libraryItemData.relPath,
|
||||
isFile: isSingleMediaItem,
|
||||
media: {
|
||||
metadata: libraryItemData.mediaMetadata || null
|
||||
},
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue