Merge pull request #636 from lededev/double-exception

bugfix
This commit is contained in:
Yoshiko2
2021-11-14 20:16:57 +08:00
committed by GitHub
5 changed files with 11 additions and 18 deletions

View File

@@ -474,18 +474,11 @@ def main():
check_update(version) check_update(version)
# Download Mapping Table, parallel version # Download Mapping Table, parallel version
down_map_tab = [] def fmd(f):
actor_xml = Path.home() / '.local' / 'share' / 'avdc' / 'mapping_actor.xml' return ('https://raw.githubusercontent.com/yoshiko2/AV_Data_Capture/master/MappingTable/' + f,
if not actor_xml.exists(): Path.home() / '.local' / 'share' / 'avdc' / f)
down_map_tab.append(( map_tab = (fmd('mapping_actor.xml'), fmd('mapping_info.xml'), fmd('c_number.json'))
"https://raw.githubusercontent.com/yoshiko2/AV_Data_Capture/master/MappingTable/mapping_actor.xml", res = parallel_download_files(((k, v) for k, v in map_tab if not v.exists()))
actor_xml))
info_xml = Path.home() / '.local' / 'share' / 'avdc' / 'mapping_info.xml'
if not info_xml.exists():
down_map_tab.append((
"https://raw.githubusercontent.com/yoshiko2/AV_Data_Capture/master/MappingTable/mapping_info.xml",
info_xml))
res = parallel_download_files(down_map_tab)
for i, fp in enumerate(res, start=1): for i, fp in enumerate(res, start=1):
if fp and len(fp): if fp and len(fp):
print(f"[+] [{i}/{len(res)}] Mapping Table Downloaded to {fp}") print(f"[+] [{i}/{len(res)}] Mapping Table Downloaded to {fp}")

View File

@@ -57,8 +57,8 @@ def getCover_small(html):
result = str(html.xpath('//*[@id="waterfall"]/div/a/div[1]/img/@src')).strip(" ['']") result = str(html.xpath('//*[@id="waterfall"]/div/a/div[1]/img/@src')).strip(" ['']")
return result return result
def getTag(html): def getTag(html):
result = html.xpath('/html/head/meta[@name="keywords"]/@content')[0].split(',') x = html.xpath('/html/head/meta[@name="keywords"]/@content')[0].split(',')
return result return [i.strip() for i in x[2:]] if len(x) > 2 else []
def getSeries(html): def getSeries(html):
try: try:
result1 = str(html.xpath('//span[contains(text(),"系列:")]/../span[2]/text()')).strip(" ['']") result1 = str(html.xpath('//span[contains(text(),"系列:")]/../span[2]/text()')).strip(" ['']")

View File

@@ -14,7 +14,7 @@ def getTitle_fc2com(htmlcode): #获取厂商
return result return result
def getActor_fc2com(htmlcode): def getActor_fc2com(htmlcode):
try: try:
htmtml = etree.fromstring(htmlcode, etree.HTMLParser()) html = etree.fromstring(htmlcode, etree.HTMLParser())
result = html.xpath('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/ul/li[3]/a/text()')[0] result = html.xpath('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/ul/li[3]/a/text()')[0]
return result return result
except: except:

View File

@@ -72,7 +72,7 @@ def getSerise(html): #获取系列
return str(x[0]) if len(x) else '' return str(x[0]) if len(x) else ''
def getTag(html): # 获取标签 def getTag(html): # 获取标签
klist = html.xpath('/html/head/meta[@name="keywords"]/@content')[0].split(',') klist = html.xpath('/html/head/meta[@name="keywords"]/@content')[0].split(',')
return klist return klist[1:]
def getExtrafanart(htmlcode): # 获取剧照 def getExtrafanart(htmlcode): # 获取剧照
html_pather = re.compile(r'<div id=\"sample-waterfall\">[\s\S]*?</div></a>\s*?</div>') html_pather = re.compile(r'<div id=\"sample-waterfall\">[\s\S]*?</div></a>\s*?</div>')
html = html_pather.search(htmlcode) html = html_pather.search(htmlcode)

View File

@@ -34,10 +34,10 @@ def main(number: str):
) )
soup = BeautifulSoup(result.text, "html.parser") soup = BeautifulSoup(result.text, "html.parser")
lx = html.fromstring(str(soup)) lx = html.fromstring(str(soup))
fanhao_pather = re.compile(r'<a href=".*?".*?><div class="id">(.*?)</div>') fanhao_pather = re.compile(r'<a href=".*?".*?><div class="id">(.*?)</div>')
fanhao = fanhao_pather.findall(result.text) fanhao = fanhao_pather.findall(result.text)
if "/?v=jav" in result.url: if "/?v=jav" in result.url:
dic = { dic = {
"title": get_title(lx, soup), "title": get_title(lx, soup),