avsox.py: 优化:完成精简

This commit is contained in:
lededev
2021-10-19 17:08:00 +08:00
parent 8559eea296
commit c3e9ab7957

View File

@@ -3,18 +3,17 @@ sys.path.append('..')
import re import re
from lxml import etree from lxml import etree
import json import json
from bs4 import BeautifulSoup
from ADC_function import * from ADC_function import *
from WebCrawler.storyline import getStoryline from WebCrawler.storyline import getStoryline
# import io # import io
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True) # sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
def getActorPhoto(soup): def getActorPhoto(html):
a = soup.find_all(attrs={'class': 'avatar-box'}) a = html.xpath('//a[@class="avatar-box"]')
d = {} d = {}
for i in a: for i in a:
l = i.img['src'] l = i.find('.//img').attrib['src']
t = i.span.get_text() t = i.find('span').text
p2 = {t: l} p2 = {t: l}
d.update(p2) d.update(p2)
return d return d
@@ -24,11 +23,11 @@ def getTitle(html):
return result.replace('/', '') return result.replace('/', '')
except: except:
return '' return ''
def getActor(soup): def getActor(html):
a = soup.find_all(attrs={'class': 'avatar-box'}) a = html.xpath('//a[@class="avatar-box"]')
d = [] d = []
for i in a: for i in a:
d.append(i.span.get_text()) d.append(i.find('span').text)
return d return d
def getStudio(html): def getStudio(html):
result1 = str(html.xpath('//p[contains(text(),"制作商: ")]/following-sibling::p[1]/a/text()')).strip(" ['']").replace("', '",' ') result1 = str(html.xpath('//p[contains(text(),"制作商: ")]/following-sibling::p[1]/a/text()')).strip(" ['']").replace("', '",' ')
@@ -57,12 +56,9 @@ def getCover(html):
def getCover_small(html): def getCover_small(html):
result = str(html.xpath('//*[@id="waterfall"]/div/a/div[1]/img/@src')).strip(" ['']") result = str(html.xpath('//*[@id="waterfall"]/div/a/div[1]/img/@src')).strip(" ['']")
return result return result
def getTag(soup): # 获取演员 def getTag(html):
a = soup.find_all(attrs={'class': 'genre'}) x = html.xpath('/html/head/meta[@name="keywords"]/@content')[0].split(',')
d = [] return x[2:] if len(x) > 2 else []
for i in a:
d.append(i.get_text())
return d
def getSeries(html): def getSeries(html):
try: try:
result1 = str(html.xpath('//span[contains(text(),"系列:")]/../span[2]/text()')).strip(" ['']") result1 = str(html.xpath('//span[contains(text(),"系列:")]/../span[2]/text()')).strip(" ['']")
@@ -74,45 +70,42 @@ def main(number):
html = get_html('https://tellme.pw/avsox') html = get_html('https://tellme.pw/avsox')
site = etree.HTML(html).xpath('//div[@class="container"]/div/a/@href')[0] site = etree.HTML(html).xpath('//div[@class="container"]/div/a/@href')[0]
a = get_html(site + '/cn/search/' + number) a = get_html(site + '/cn/search/' + number)
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text() html = etree.fromstring(a, etree.HTMLParser())
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']") result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
if result1 == '' or result1 == 'null' or result1 == 'None': if result1 == '' or result1 == 'null' or result1 == 'None':
a = get_html(site + '/cn/search/' + number.replace('-', '_')) a = get_html(site + '/cn/search/' + number.replace('-', '_'))
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text() html = etree.fromstring(a, etree.HTMLParser())
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']") result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
if result1 == '' or result1 == 'null' or result1 == 'None': if result1 == '' or result1 == 'null' or result1 == 'None':
a = get_html(site + '/cn/search/' + number.replace('_', '')) a = get_html(site + '/cn/search/' + number.replace('_', ''))
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text() html = etree.fromstring(a, etree.HTMLParser())
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']") result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
web = get_html("https:" + result1) detail = get_html("https:" + result1)
soup = BeautifulSoup(web, 'lxml') lx = etree.fromstring(detail, etree.HTMLParser())
web = etree.fromstring(web, etree.HTMLParser())
info = str(soup.find(attrs={'class': 'row movie'}))
info = etree.fromstring(info, etree.HTMLParser())
try: try:
new_number = getNum(info) new_number = getNum(lx)
if new_number.upper() != number.upper(): if new_number.upper() != number.upper():
raise ValueError('number not found') raise ValueError('number not found')
title = getTitle(web).strip(getNum(web)) title = getTitle(lx).strip(new_number)
dic = { dic = {
'actor': getActor(soup), 'actor': getActor(lx),
'title': title, 'title': title,
'studio': getStudio(info), 'studio': getStudio(lx),
'outline': getStoryline(number, title), 'outline': getStoryline(number, title),
'runtime': getRuntime(info), 'runtime': getRuntime(lx),
'director': '', # 'director': '', #
'release': getRelease(info), 'release': getRelease(lx),
'number': new_number, 'number': new_number,
'cover': getCover(web), 'cover': getCover(lx),
'cover_small': getCover_small(html), 'cover_small': getCover_small(html),
'imagecut': 3, 'imagecut': 3,
'tag': getTag(soup), 'tag': getTag(lx),
'label': getLabel(info), 'label': getLabel(lx),
'year': getYear(getRelease(info)), # str(re.search('\d{4}',getRelease(a)).group()), 'year': getYear(getRelease(lx)),
'actor_photo': getActorPhoto(soup), 'actor_photo': getActorPhoto(lx),
'website': "https:" + result1, 'website': "https:" + result1,
'source': 'avsox.py', 'source': 'avsox.py',
'series': getSeries(info), 'series': getSeries(lx),
} }
except Exception as e: except Exception as e:
if config.getInstance().debug(): if config.getInstance().debug():