Update new library scanner for scanning in new books

This commit is contained in:
advplyr 2023-09-01 18:01:17 -05:00
parent 75276f5a44
commit 0ecfdab463
13 changed files with 694 additions and 35 deletions

View file

@ -82,6 +82,11 @@ function getIno(path) {
}
module.exports.getIno = getIno
/**
* Read contents of file
* @param {string} path
* @returns {string}
*/
async function readTextFile(path) {
try {
var data = await fs.readFile(path)

View file

@ -24,7 +24,7 @@ const CurrentAbMetadataVersion = 2
const commaSeparatedToArray = (v) => {
if (!v) return []
return v.split(',').map(_v => _v.trim()).filter(_v => _v)
return [...new Set(v.split(',').map(_v => _v.trim()).filter(_v => _v))]
}
const podcastMetadataMapper = {
@ -401,7 +401,10 @@ function checkArraysChanged(abmetadataArray, mediaArray) {
function parseJsonMetadataText(text) {
try {
const abmetadataData = JSON.parse(text)
if (abmetadataData.metadata?.series?.length) {
if (!abmetadataData.metadata) abmetadataData.metadata = {}
if (abmetadataData.metadata.series?.length) {
abmetadataData.metadata.series = [...new Set(abmetadataData.metadata.series.map(t => t?.trim()).filter(t => t))]
abmetadataData.metadata.series = abmetadataData.metadata.series.map(series => {
let sequence = null
let name = series
@ -418,12 +421,31 @@ function parseJsonMetadataText(text) {
}
})
}
// clean tags & remove dupes
if (abmetadataData.tags?.length) {
abmetadataData.tags = [...new Set(abmetadataData.tags.map(t => t?.trim()).filter(t => t))]
}
// TODO: Clean chapters
if (abmetadataData.chapters?.length) {
}
// clean remove dupes
if (abmetadataData.metadata.authors?.length) {
abmetadataData.metadata.authors = [...new Set(abmetadataData.metadata.authors.map(t => t?.trim()).filter(t => t))]
}
if (abmetadataData.metadata.narrators?.length) {
abmetadataData.metadata.narrators = [...new Set(abmetadataData.metadata.narrators.map(t => t?.trim()).filter(t => t))]
}
if (abmetadataData.metadata.genres?.length) {
abmetadataData.metadata.genres = [...new Set(abmetadataData.metadata.genres.map(t => t?.trim()).filter(t => t))]
}
return abmetadataData
} catch (error) {
Logger.error(`[abmetadataGenerator] Invalid metadata.json JSON`, error)
return null
}
}
module.exports.parseJson = parseJsonMetadataText
function cleanChaptersArray(chaptersArray, mediaTitle) {
const chapters = []

View file

@ -147,10 +147,22 @@ const getTitleParts = (title) => {
return [title, null]
}
/**
* Remove sortingPrefixes from title
* @example "The Good Book" => "Good Book"
* @param {string} title
* @returns {string}
*/
module.exports.getTitleIgnorePrefix = (title) => {
return getTitleParts(title)[0]
}
/**
* Put sorting prefix at the end of title
* @example "The Good Book" => "Good Book, The"
* @param {string} title
* @returns {string}
*/
module.exports.getTitlePrefixAtEnd = (title) => {
let [sort, prefix] = getTitleParts(title)
return prefix ? `${sort}, ${prefix}` : title

View file

@ -159,8 +159,8 @@ module.exports.parseOpfMetadataXML = async (xml) => {
}
const creators = parseCreators(metadata)
const authors = (fetchCreators(creators, 'aut') || []).filter(au => au && au.trim())
const narrators = (fetchNarrators(creators, metadata) || []).filter(nrt => nrt && nrt.trim())
const authors = (fetchCreators(creators, 'aut') || []).map(au => au?.trim()).filter(au => au)
const narrators = (fetchNarrators(creators, metadata) || []).map(nrt => nrt?.trim()).filter(nrt => nrt)
const data = {
title: fetchTitle(metadata),
subtitle: fetchSubtitle(metadata),