xcity.py:尝试获得中文剧情简介,没有则用原来的。修复tag数目不对,修复runtime不显示
This commit is contained in:
@@ -3,16 +3,12 @@ sys.path.append('../')
|
|||||||
import re
|
import re
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
import json
|
import json
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from ADC_function import *
|
from ADC_function import *
|
||||||
|
from WebCrawler.storyline import getStoryline
|
||||||
|
|
||||||
# import sys
|
|
||||||
# import io
|
# import io
|
||||||
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
||||||
|
|
||||||
def getTitle(a):
|
def getTitle(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser())
|
|
||||||
result = html.xpath('//*[@id="program_detail_title"]/text()')[0]
|
result = html.xpath('//*[@id="program_detail_title"]/text()')[0]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -43,8 +39,7 @@ def getActorPhoto(browser):
|
|||||||
return o
|
return o
|
||||||
|
|
||||||
|
|
||||||
def getStudio(a):
|
def getStudio(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result = str(html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[1]/li[4]/a/span/text()')).strip(" ['']")
|
result = str(html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[1]/li[4]/a/span/text()')).strip(" ['']")
|
||||||
except:
|
except:
|
||||||
@@ -52,20 +47,14 @@ def getStudio(a):
|
|||||||
return result.strip('+').replace("', '", '').replace('"', '')
|
return result.strip('+').replace("', '", '').replace('"', '')
|
||||||
|
|
||||||
|
|
||||||
def getRuntime(a):
|
def getRuntime(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result1 = html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[2]/li[3]/text()')[0]
|
x = html.xpath('//span[@class="koumoku" and text()="収録時間"]/../text()')[1].strip()
|
||||||
except:
|
return x
|
||||||
return ''
|
|
||||||
try:
|
|
||||||
return re.findall('\d+',result1)[0]
|
|
||||||
except:
|
except:
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
def getLabel(html):
|
||||||
def getLabel(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[1]/li[5]/a/span/text()')[0]
|
result = html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[1]/li[5]/a/span/text()')[0]
|
||||||
return result
|
return result
|
||||||
@@ -73,8 +62,7 @@ def getLabel(a):
|
|||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
|
||||||
def getNum(a):
|
def getNum(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser())
|
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//*[@id="hinban"]/text()')[0]
|
result = html.xpath('//*[@id="hinban"]/text()')[0]
|
||||||
return result
|
return result
|
||||||
@@ -90,8 +78,7 @@ def getYear(getRelease):
|
|||||||
return getRelease
|
return getRelease
|
||||||
|
|
||||||
|
|
||||||
def getRelease(a):
|
def getRelease(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result = str(html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[1]/li[2]/text()')[1])
|
result = str(html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[1]/li[2]/text()')[1])
|
||||||
except:
|
except:
|
||||||
@@ -102,31 +89,22 @@ def getRelease(a):
|
|||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
|
||||||
def getTag(a):
|
def getTag(html):
|
||||||
result2=[]
|
x = html.xpath('//span[@class="koumoku" and text()="ジャンル"]/../a[starts-with(@href,"/avod/genre/")]/text()')
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
return [translateTag_to_sc(i.strip()) for i in x if len(i.strip())] if len(x) and len(x[0]) else []
|
||||||
result1 = html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[1]/li[6]/a/text()')
|
|
||||||
for i in result1:
|
|
||||||
i=i.replace(u'\n','')
|
|
||||||
i=i.replace(u'\t','')
|
|
||||||
if len(i):
|
|
||||||
result2.append(i)
|
|
||||||
return result2
|
|
||||||
|
|
||||||
|
|
||||||
def getCover_small(a, index=0):
|
def getCover_small(html, index=0):
|
||||||
# same issue mentioned below,
|
# same issue mentioned below,
|
||||||
# javdb sometime returns multiple results
|
# javdb sometime returns multiple results
|
||||||
# DO NOT just get the firt one, get the one with correct index number
|
# DO NOT just get the firt one, get the one with correct index number
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result = html.xpath("//div[@class='item-image fix-scale-cover']/img/@src")[index]
|
result = html.xpath("//div[@class='item-image fix-scale-cover']/img/@src")[index]
|
||||||
if not 'https' in result:
|
if not 'https' in result:
|
||||||
result = 'https:' + result
|
result = 'https:' + result
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def getCover(htmlcode):
|
def getCover(html):
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//*[@id="avodDetails"]/div/div[3]/div[1]/p/a/@href')[0]
|
result = html.xpath('//*[@id="avodDetails"]/div/div[3]/div[1]/p/a/@href')[0]
|
||||||
return 'https:' + result
|
return 'https:' + result
|
||||||
@@ -134,8 +112,7 @@ def getCover(htmlcode):
|
|||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
|
||||||
def getDirector(a):
|
def getDirector(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//*[@id="program_detail_director"]/text()')[0].replace(u'\n','').replace(u'\t', '')
|
result = html.xpath('//*[@id="program_detail_director"]/text()')[0].replace(u'\n','').replace(u'\t', '')
|
||||||
return result
|
return result
|
||||||
@@ -143,19 +120,21 @@ def getDirector(a):
|
|||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
|
||||||
def getOutline(htmlcode):
|
def getOutline(html, number, title):
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
storyline_site = config.getInstance().storyline_site().split(',')
|
||||||
|
a = set(storyline_site) & {'airav', 'avno1'}
|
||||||
|
if len(a):
|
||||||
|
site = [n for n in storyline_site if n in a]
|
||||||
|
g = getStoryline(number, title, site)
|
||||||
|
if len(g):
|
||||||
|
return g
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//*[@id="avodDetails"]/div/div[3]/div[2]/div/ul[2]/li[5]/p/text()')[0]
|
x = html.xpath('//h2[@class="title-detail"]/../p[@class="lead"]/text()')[0]
|
||||||
|
return x.replace(getNum(html), '')
|
||||||
except:
|
except:
|
||||||
return ''
|
return ''
|
||||||
try:
|
|
||||||
return re.sub('\\\\\w*\d+','',result)
|
|
||||||
except:
|
|
||||||
return result
|
|
||||||
|
|
||||||
def getSeries(htmlcode):
|
def getSeries(html):
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
result = html.xpath("//span[contains(text(),'シリーズ')]/../a/span/text()")[0]
|
result = html.xpath("//span[contains(text(),'シリーズ')]/../a/span/text()")[0]
|
||||||
@@ -198,33 +177,35 @@ def main(number):
|
|||||||
try:
|
try:
|
||||||
detail_page, browser = open_by_browser(number)
|
detail_page, browser = open_by_browser(number)
|
||||||
url = browser.url
|
url = browser.url
|
||||||
newnum = getNum(detail_page).upper()
|
lx = etree.fromstring(detail_page, etree.HTMLParser())
|
||||||
|
newnum = getNum(lx).upper()
|
||||||
number_up = number.upper()
|
number_up = number.upper()
|
||||||
if newnum != number_up:
|
if newnum != number_up:
|
||||||
if newnum == number.replace('-','').upper():
|
if newnum == number.replace('-','').upper():
|
||||||
newnum = number_up
|
newnum = number_up
|
||||||
else:
|
else:
|
||||||
raise ValueError("xcity.py: number not found")
|
raise ValueError("xcity.py: number not found")
|
||||||
|
title = getTitle(lx)
|
||||||
dic = {
|
dic = {
|
||||||
'actor': getActor(browser),
|
'actor': getActor(browser),
|
||||||
'title': getTitle(detail_page),
|
'title': title,
|
||||||
'studio': getStudio(detail_page),
|
'studio': getStudio(lx),
|
||||||
'outline': getOutline(detail_page),
|
'outline': getOutline(lx, number, title),
|
||||||
'runtime': getRuntime(detail_page),
|
'runtime': getRuntime(lx),
|
||||||
'director': getDirector(detail_page),
|
'director': getDirector(lx),
|
||||||
'release': getRelease(detail_page),
|
'release': getRelease(lx),
|
||||||
'number': newnum,
|
'number': newnum,
|
||||||
'cover': getCover(detail_page),
|
'cover': getCover(lx),
|
||||||
'cover_small': '',
|
'cover_small': '',
|
||||||
'extrafanart': getExtrafanart(detail_page),
|
'extrafanart': getExtrafanart(detail_page),
|
||||||
'imagecut': 1,
|
'imagecut': 1,
|
||||||
'tag': getTag(detail_page),
|
'tag': getTag(lx),
|
||||||
'label': getLabel(detail_page),
|
'label': getLabel(lx),
|
||||||
'year': getYear(getRelease(detail_page)), # str(re.search('\d{4}',getRelease(a)).group()),
|
'year': getYear(getRelease(lx)), # str(re.search('\d{4}',getRelease(a)).group()),
|
||||||
# 'actor_photo': getActorPhoto(browser),
|
# 'actor_photo': getActorPhoto(browser),
|
||||||
'website': url,
|
'website': url,
|
||||||
'source': 'xcity.py',
|
'source': 'xcity.py',
|
||||||
'series': getSeries(detail_page),
|
'series': getSeries(lx),
|
||||||
}
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.getInstance().debug():
|
if config.getInstance().debug():
|
||||||
|
|||||||
Reference in New Issue
Block a user