WebCrawler:全面换装getInstance(),厘清airav.py与javbus.py及javdb.py的相爱相杀
This commit is contained in:
@@ -134,6 +134,14 @@ def get_data_from_json(file_number, conf: config.Config): # 从JSON返回元数
|
|||||||
print('[-]Movie Number not found!')
|
print('[-]Movie Number not found!')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
# 增加number严格判断,避免提交任何number,总是返回"本橋実来 ADZ335",这种返回number不一致的数据源故障
|
||||||
|
# 目前选用number命名规则是javdb.com Domain Creation Date: 2013-06-19T18:34:27Z
|
||||||
|
# 然而也可以跟进关注其它命名规则例如airav.wiki Domain Creation Date: 2019-08-28T07:18:42.0Z
|
||||||
|
# 如果将来javdb.com命名规则下不同Studio出现同名碰撞导致无法区分,可考虑更换规则,更新相应的number分析和抓取代码。
|
||||||
|
if str(json_data.get('number')).upper() != file_number.upper():
|
||||||
|
print('[-]Movie number has changed! [{}]->[{}]'.format(file_number, str(json_data.get('number'))))
|
||||||
|
return None
|
||||||
|
|
||||||
# ================================================网站规则添加结束================================================
|
# ================================================网站规则添加结束================================================
|
||||||
|
|
||||||
title = json_data.get('title')
|
title = json_data.get('title')
|
||||||
@@ -225,6 +233,8 @@ def get_data_from_json(file_number, conf: config.Config): # 从JSON返回元数
|
|||||||
studio = studio.replace('エムズビデオグループ','M’s Video Group')
|
studio = studio.replace('エムズビデオグループ','M’s Video Group')
|
||||||
studio = studio.replace('ミニマム','Minimum')
|
studio = studio.replace('ミニマム','Minimum')
|
||||||
studio = studio.replace('ワープエンタテインメント','WAAP Entertainment')
|
studio = studio.replace('ワープエンタテインメント','WAAP Entertainment')
|
||||||
|
studio = studio.replace('pacopacomama,パコパコママ','pacopacomama')
|
||||||
|
studio = studio.replace('パコパコママ','pacopacomama')
|
||||||
studio = re.sub('.*/妄想族','妄想族',studio)
|
studio = re.sub('.*/妄想族','妄想族',studio)
|
||||||
studio = studio.replace('/',' ')
|
studio = studio.replace('/',' ')
|
||||||
# === 替换Studio片假名 END
|
# === 替换Studio片假名 END
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from lxml import etree#need install
|
|||||||
from bs4 import BeautifulSoup#need install
|
from bs4 import BeautifulSoup#need install
|
||||||
import json
|
import json
|
||||||
from ADC_function import *
|
from ADC_function import *
|
||||||
|
from WebCrawler import javbus
|
||||||
|
|
||||||
'''
|
'''
|
||||||
API
|
API
|
||||||
@@ -17,95 +18,94 @@ API
|
|||||||
host = 'https://www.airav.wiki'
|
host = 'https://www.airav.wiki'
|
||||||
|
|
||||||
# airav这个网站没有演员图片,所以直接使用javbus的图
|
# airav这个网站没有演员图片,所以直接使用javbus的图
|
||||||
def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img
|
def getActorPhoto(javbus_json):
|
||||||
soup = BeautifulSoup(htmlcode, 'lxml')
|
result = javbus_json.get('actor_photo')
|
||||||
a = soup.find_all(attrs={'class': 'star-name'})
|
if isinstance(result, dict) and len(result):
|
||||||
d={}
|
return result
|
||||||
for i in a:
|
return ''
|
||||||
l=i.a['href']
|
|
||||||
t=i.get_text()
|
|
||||||
html = etree.fromstring(get_html(l), etree.HTMLParser())
|
|
||||||
p=urljoin("https://www.javbus.com",
|
|
||||||
str(html.xpath('//*[@id="waterfall"]/div[1]/div/div[1]/img/@src')).strip(" ['']"))
|
|
||||||
p2={t:p}
|
|
||||||
d.update(p2)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def getTitle(htmlcode): #获取标题
|
def getTitle(htmlcode): #获取标题
|
||||||
doc = pq(htmlcode)
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
# h5:first-child定位第一个h5标签,妈的找了好久才找到这个语法
|
title = str(html.xpath('/html/head/title/text()')[0])
|
||||||
title = str(doc('div.d-flex.videoDataBlock h5.d-none.d-md-block:nth-child(2)').text()).replace(' ', '-')
|
result = str(re.findall('](.*?)- AIRAV-WIKI', title)[0]).strip()
|
||||||
try:
|
return result
|
||||||
title2 = re.sub('n\d+-','',title)
|
|
||||||
|
|
||||||
return title2
|
def getStudio(htmlcode, javbus_json): #获取厂商 已修改
|
||||||
|
# javbus如果有数据以它为准
|
||||||
|
result = javbus_json.get('studio')
|
||||||
|
if isinstance(result, str) and len(result):
|
||||||
|
return result
|
||||||
|
html = etree.fromstring(htmlcode,etree.HTMLParser())
|
||||||
|
return str(html.xpath('//a[contains(@href,"?video_factory=")]/text()')).strip(" ['']")
|
||||||
|
def getYear(htmlcode, javbus_json): #获取年份
|
||||||
|
result = javbus_json.get('year')
|
||||||
|
if isinstance(result, str) and len(result):
|
||||||
|
return result
|
||||||
|
release = getRelease(htmlcode, javbus_json)
|
||||||
|
if len(release) != len('2000-01-01'):
|
||||||
|
return ''
|
||||||
|
return release[:4]
|
||||||
|
def getCover(htmlcode, javbus_json): #获取封面图片
|
||||||
|
result = javbus_json.get('cover')
|
||||||
|
if isinstance(result, str) and len(result):
|
||||||
|
return result
|
||||||
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
|
return html.xpath('//img[contains(@src,"/storage/big_pic/")]/@src')[0]
|
||||||
|
def getRelease(htmlcode, javbus_json): #获取出版日期
|
||||||
|
result = javbus_json.get('release')
|
||||||
|
if isinstance(result, str) and len(result):
|
||||||
|
return result
|
||||||
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
|
try:
|
||||||
|
result = re.search(r'\d{4}-\d{2}-\d{2}', str(html.xpath('//li[contains(text(),"發片日期")]/text()'))).group()
|
||||||
except:
|
except:
|
||||||
return title
|
return ''
|
||||||
|
|
||||||
def getStudio(htmlcode): #获取厂商 已修改
|
|
||||||
html = etree.fromstring(htmlcode,etree.HTMLParser())
|
|
||||||
# 如果记录中冇导演,厂商排在第4位
|
|
||||||
if '製作商:' == str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[4]/span/text()')).strip(" ['']"):
|
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[4]/a/text()')).strip(" ['']")
|
|
||||||
# 如果记录中有导演,厂商排在第5位
|
|
||||||
elif '製作商:' == str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[5]/span/text()')).strip(" ['']"):
|
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[5]/a/text()')).strip(" ['']")
|
|
||||||
else:
|
|
||||||
result = ''
|
|
||||||
return result
|
return result
|
||||||
def getYear(htmlcode): #获取年份
|
def getRuntime(javbus_json): #获取播放时长
|
||||||
html = etree.fromstring(htmlcode,etree.HTMLParser())
|
result = javbus_json.get('runtime')
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']")
|
if isinstance(result, str) and len(result):
|
||||||
return result
|
return result
|
||||||
def getCover(htmlcode): #获取封面链接
|
return ''
|
||||||
doc = pq(htmlcode)
|
# airav女优数据库较多日文汉字姓名,javbus较多日语假名,因此airav优先
|
||||||
image = doc('a.bigImage')
|
def getActor(htmlcode, javbus_json): #获取女优
|
||||||
return urljoin("https://www.javbus.com", image.attr('href'))
|
|
||||||
def getRelease(htmlcode): #获取出版日期
|
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']")
|
|
||||||
return result
|
|
||||||
def getRuntime(htmlcode): #获取分钟 已修改
|
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[3]/text()')).strip(" ['']分鐘")
|
|
||||||
return result
|
|
||||||
def getActor(htmlcode): #获取女优
|
|
||||||
b=[]
|
b=[]
|
||||||
soup=BeautifulSoup(htmlcode,'lxml')
|
|
||||||
a=soup.find_all(attrs={'class':'star-name'})
|
|
||||||
for i in a:
|
|
||||||
b.append(i.get_text())
|
|
||||||
return b
|
|
||||||
def getNum(htmlcode): #获取番号
|
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[1]/span[2]/text()')).strip(" ['']")
|
a = html.xpath('//ul[@class="videoAvstarList"]/li/a[starts-with(@href,"/idol/")]/text()')
|
||||||
return result
|
for v in a:
|
||||||
def getDirector(htmlcode): #获取导演 已修改
|
v = v.strip()
|
||||||
|
if len(v):
|
||||||
|
b.append(v)
|
||||||
|
if len(b):
|
||||||
|
return b
|
||||||
|
result = javbus_json.get('actor')
|
||||||
|
if isinstance(result, list) and len(result):
|
||||||
|
return result
|
||||||
|
return []
|
||||||
|
def getNum(htmlcode, javbus_json): #获取番号
|
||||||
|
result = javbus_json.get('number')
|
||||||
|
if isinstance(result, str) and len(result):
|
||||||
|
return result
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
if '導演:' == str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[4]/span/text()')).strip(" ['']"):
|
title = str(html.xpath('/html/head/title/text()')[0])
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[4]/a/text()')).strip(" ['']")
|
result = str(re.findall('^\[(.*?)]', title)[0])
|
||||||
else:
|
|
||||||
result = '' # 记录中有可能没有导演数据
|
|
||||||
return result
|
return result
|
||||||
|
def getDirector(javbus_json): #获取导演 已修改
|
||||||
def getOutline(htmlcode): #获取演员
|
result = javbus_json.get('director')
|
||||||
|
if isinstance(result, str) and len(result):
|
||||||
|
return result
|
||||||
|
return ''
|
||||||
|
def getOutline(htmlcode): #获取概述
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
try:
|
try:
|
||||||
result = html.xpath("string(//div[@class='d-flex videoDataBlock']/div[@class='synopsis']/p)").replace('\n','')
|
result = html.xpath("string(//div[@class='d-flex videoDataBlock']/div[@class='synopsis']/p)").replace('\n','').strip()
|
||||||
return result
|
return result
|
||||||
except:
|
except:
|
||||||
return ''
|
return ''
|
||||||
def getSerise(htmlcode): #获取系列 已修改
|
def getSerise(javbus_json): #获取系列 已修改
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
result = javbus_json.get('series')
|
||||||
# 如果记录中冇导演,系列排在第6位
|
if isinstance(result, str) and len(result):
|
||||||
if '系列:' == str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[6]/span/text()')).strip(" ['']"):
|
return result
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[6]/a/text()')).strip(" ['']")
|
return ''
|
||||||
# 如果记录中有导演,系列排在第7位
|
|
||||||
elif '系列:' == str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[7]/span/text()')).strip(" ['']"):
|
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[7]/a/text()')).strip(" ['']")
|
|
||||||
else:
|
|
||||||
result = ''
|
|
||||||
return result
|
|
||||||
def getTag(htmlcode): # 获取标签
|
def getTag(htmlcode): # 获取标签
|
||||||
tag = []
|
tag = []
|
||||||
soup = BeautifulSoup(htmlcode, 'lxml')
|
soup = BeautifulSoup(htmlcode, 'lxml')
|
||||||
@@ -169,52 +169,50 @@ def main(number):
|
|||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
htmlcode = get_html('https://cn.airav.wiki/video/' + number)
|
htmlcode = get_html('https://cn.airav.wiki/video/' + number)
|
||||||
javbus_htmlcode = get_html('https://www.javbus.com/ja/' + number)
|
javbus_json = json.loads(javbus.main(number))
|
||||||
|
|
||||||
|
|
||||||
except:
|
except:
|
||||||
print(number)
|
print(number)
|
||||||
|
|
||||||
dic = {
|
dic = {
|
||||||
# 标题可使用airav
|
# 标题可使用airav
|
||||||
'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))),
|
'title': getTitle(htmlcode),
|
||||||
# 制作商选择使用javbus
|
# 制作商先找javbus,如果没有再找本站
|
||||||
'studio': getStudio(javbus_htmlcode),
|
'studio': getStudio(htmlcode, javbus_json),
|
||||||
# 年份也是用javbus
|
# 年份先试javbus,如果没有再找本站
|
||||||
'year': str(re.search('\d{4}', getYear(javbus_htmlcode)).group()),
|
'year': getYear(htmlcode, javbus_json),
|
||||||
# 简介 使用 airav
|
# 简介 使用 airav
|
||||||
'outline': getOutline(htmlcode),
|
'outline': getOutline(htmlcode),
|
||||||
# 使用javbus
|
# 使用javbus
|
||||||
'runtime': getRuntime(javbus_htmlcode),
|
'runtime': getRuntime(javbus_json),
|
||||||
# 导演 使用javbus
|
# 导演 使用javbus
|
||||||
'director': getDirector(javbus_htmlcode),
|
'director': getDirector(javbus_json),
|
||||||
# 作者 使用airav
|
# 演员 先试airav
|
||||||
'actor': getActor(javbus_htmlcode),
|
'actor': getActor(htmlcode, javbus_json),
|
||||||
# 发售日使用javbus
|
# 发售日先试javbus
|
||||||
'release': getRelease(javbus_htmlcode),
|
'release': getRelease(htmlcode, javbus_json),
|
||||||
# 番号使用javbus
|
# 番号使用javbus
|
||||||
'number': getNum(javbus_htmlcode),
|
'number': getNum(htmlcode, javbus_json),
|
||||||
# 封面链接 使用javbus
|
# 封面链接 使用javbus
|
||||||
'cover': getCover(javbus_htmlcode),
|
'cover': getCover(htmlcode, javbus_json),
|
||||||
# 剧照获取
|
# 剧照获取
|
||||||
'extrafanart': getExtrafanart(htmlcode),
|
'extrafanart': getExtrafanart(htmlcode),
|
||||||
'imagecut': 1,
|
'imagecut': 1,
|
||||||
# 使用 airav
|
# 使用 airav
|
||||||
'tag': getTag(htmlcode),
|
'tag': getTag(htmlcode),
|
||||||
# 使用javbus
|
# 使用javbus
|
||||||
'label': getSerise(javbus_htmlcode),
|
'label': getSerise(javbus_json),
|
||||||
# 妈的,airav不提供作者图片
|
# 妈的,airav不提供作者图片
|
||||||
'actor_photo': getActorPhoto(javbus_htmlcode),
|
# 'actor_photo': getActorPhoto(javbus_json),
|
||||||
|
|
||||||
'website': 'https://www.airav.wiki/video/' + number,
|
'website': 'https://www.airav.wiki/video/' + number,
|
||||||
'source': 'airav.py',
|
'source': 'airav.py',
|
||||||
# 使用javbus
|
# 使用javbus
|
||||||
'series': getSerise(javbus_htmlcode),
|
'series': getSerise(javbus_json)
|
||||||
}
|
}
|
||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4,separators=(',', ':'), ) # .encode('UTF-8')
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4,separators=(',', ':'), ) # .encode('UTF-8')
|
||||||
return js
|
return js
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.Config().debug():
|
if config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
data = {
|
data = {
|
||||||
"title": "",
|
"title": "",
|
||||||
@@ -226,6 +224,6 @@ def main(number):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
#print(main('ADN-188'))
|
print(main('ADV-R0624')) # javbus页面返回404, airav有数据
|
||||||
print(main('ADN-188'))
|
print(main('ADN-188')) # 一人
|
||||||
print(main('CJOD-278'))
|
print(main('CJOD-278')) # 多人 javbus演员名称采用日语假名,airav采用日文汉字
|
||||||
|
|||||||
@@ -100,6 +100,9 @@ def main(number):
|
|||||||
soup = BeautifulSoup(web, 'lxml')
|
soup = BeautifulSoup(web, 'lxml')
|
||||||
info = str(soup.find(attrs={'class': 'row movie'}))
|
info = str(soup.find(attrs={'class': 'row movie'}))
|
||||||
try:
|
try:
|
||||||
|
new_number = getNum(info)
|
||||||
|
if new_number.upper() != number.upper():
|
||||||
|
raise ValueError('number not found')
|
||||||
dic = {
|
dic = {
|
||||||
'actor': getActor(web),
|
'actor': getActor(web),
|
||||||
'title': getTitle(web).strip(getNum(web)),
|
'title': getTitle(web).strip(getNum(web)),
|
||||||
@@ -108,7 +111,7 @@ def main(number):
|
|||||||
'runtime': getRuntime(info),
|
'runtime': getRuntime(info),
|
||||||
'director': '', #
|
'director': '', #
|
||||||
'release': getRelease(info),
|
'release': getRelease(info),
|
||||||
'number': getNum(info),
|
'number': new_number,
|
||||||
'cover': getCover(web),
|
'cover': getCover(web),
|
||||||
'cover_small': getCover_small(a),
|
'cover_small': getCover_small(a),
|
||||||
'imagecut': 3,
|
'imagecut': 3,
|
||||||
@@ -121,7 +124,7 @@ def main(number):
|
|||||||
'series': getSeries(info),
|
'series': getSeries(info),
|
||||||
}
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.Config().debug():
|
if config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
dic = {"title": ""}
|
dic = {"title": ""}
|
||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
||||||
@@ -129,3 +132,4 @@ def main(number):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print(main('012717_472'))
|
print(main('012717_472'))
|
||||||
|
print(main('1')) # got fake result raise 'number not found'
|
||||||
|
|||||||
@@ -1,51 +1,53 @@
|
|||||||
import sys
|
import sys
|
||||||
sys.path.append('../')
|
sys.path.append('../')
|
||||||
import json
|
import json
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
from lxml import html
|
from lxml import html
|
||||||
import re
|
import re
|
||||||
from ADC_function import *
|
from ADC_function import *
|
||||||
|
|
||||||
def main(number: str) -> json:
|
def main(number: str) -> json:
|
||||||
try:
|
try:
|
||||||
caribbytes, browser = get_html_by_browser(
|
carib_obj, browser = get_html_by_browser(
|
||||||
'https://www.caribbeancom.com/moviepages/'+number+'/index.html',
|
'https://www.caribbeancom.com/moviepages/'+number+'/index.html',
|
||||||
return_type="browser")
|
return_type="browser")
|
||||||
|
|
||||||
if not caribbytes or not caribbytes.ok:
|
if not carib_obj or not carib_obj.ok:
|
||||||
raise ValueError("page not found")
|
raise ValueError("page not found")
|
||||||
|
|
||||||
lx = html.fromstring(str(browser.page))
|
lx = html.fromstring(str(browser.page))
|
||||||
|
|
||||||
if not browser.page.select_one("#moviepages > div > div:nth-child(1) > div.movie-info.section"):
|
if not browser.page.select_one("#moviepages > div > div:nth-child(1) > div.movie-info.section"):
|
||||||
raise ValueError("page info not found")
|
raise ValueError("page info not found")
|
||||||
|
|
||||||
|
dic = {
|
||||||
|
'title': get_title(lx),
|
||||||
|
'studio': '加勒比',
|
||||||
|
'year': get_year(lx),
|
||||||
|
'outline': get_outline(lx),
|
||||||
|
'runtime': get_runtime(lx),
|
||||||
|
'director': '',
|
||||||
|
'actor': get_actor(lx),
|
||||||
|
'release': get_release(lx),
|
||||||
|
'number': number,
|
||||||
|
'cover': 'https://www.caribbeancom.com/moviepages/' + number + '/images/l_l.jpg',
|
||||||
|
'tag': get_tag(lx),
|
||||||
|
'extrafanart': get_extrafanart(lx),
|
||||||
|
'label': get_series(lx),
|
||||||
|
'imagecut': 1,
|
||||||
|
# 'actor_photo': get_actor_photo(browser),
|
||||||
|
'website': 'https://www.caribbeancom.com/moviepages/' + number + '/index.html',
|
||||||
|
'source': 'carib.py',
|
||||||
|
'series': get_series(lx),
|
||||||
|
}
|
||||||
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), )
|
||||||
|
return js
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.Config().debug():
|
if config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
dic = {"title": ""}
|
dic = {"title": ""}
|
||||||
return json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'))
|
return json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'))
|
||||||
dic = {
|
|
||||||
'title': get_title(lx),
|
|
||||||
'studio': '加勒比',
|
|
||||||
'year': get_year(lx),
|
|
||||||
'outline': get_outline(lx),
|
|
||||||
'runtime': get_runtime(lx),
|
|
||||||
'director': '',
|
|
||||||
'actor': get_actor(lx),
|
|
||||||
'release': get_release(lx),
|
|
||||||
'number': number,
|
|
||||||
'cover': 'https://www.caribbeancom.com/moviepages/' + number + '/images/l_l.jpg',
|
|
||||||
'tag': get_tag(lx),
|
|
||||||
'extrafanart': get_extrafanart(lx),
|
|
||||||
'label': get_series(lx),
|
|
||||||
'imagecut': 1,
|
|
||||||
# 'actor_photo': get_actor_photo(browser),
|
|
||||||
'website': 'https://www.caribbeancom.com/moviepages/' + number + '/index.html',
|
|
||||||
'source': 'carib.py',
|
|
||||||
'series': get_series(lx),
|
|
||||||
}
|
|
||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), )
|
|
||||||
return js
|
|
||||||
|
|
||||||
def get_title(lx: html.HtmlElement) -> str:
|
def get_title(lx: html.HtmlElement) -> str:
|
||||||
return str(lx.xpath("//div[@class='movie-info section']/div[@class='heading']/h1[@itemprop='name']/text()")[0]).strip()
|
return str(lx.xpath("//div[@class='movie-info section']/div[@class='heading']/h1[@itemprop='name']/text()")[0]).strip()
|
||||||
@@ -114,11 +116,10 @@ def get_actor_photo(browser):
|
|||||||
if pos<0:
|
if pos<0:
|
||||||
continue
|
continue
|
||||||
css = html[pos:pos+100]
|
css = html[pos:pos+100]
|
||||||
p0 = css.find('background: url(')
|
cssBGjpgs = re.findall(r'background: url\((.+\.jpg)', css, re.I)
|
||||||
p1 = css.find('.jpg)')
|
if not cssBGjpgs or not len(cssBGjpgs[0]):
|
||||||
if p0<0 or p1<0:
|
|
||||||
continue
|
continue
|
||||||
p = {k: urljoin(browser.url, css[p0+len('background: url('):p1+len('.jpg')])}
|
p = {k: urljoin(browser.url, cssBGjpgs[0])}
|
||||||
o.update(p)
|
o.update(p)
|
||||||
return o
|
return o
|
||||||
|
|
||||||
|
|||||||
@@ -153,7 +153,7 @@ def main(number):
|
|||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
||||||
return js
|
return js
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.Config().debug():
|
if config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
data = {
|
data = {
|
||||||
"title": "",
|
"title": "",
|
||||||
|
|||||||
@@ -93,6 +93,7 @@ def main(number):
|
|||||||
actor = '素人'
|
actor = '素人'
|
||||||
lx = etree.fromstring(htmlcode2, etree.HTMLParser())
|
lx = etree.fromstring(htmlcode2, etree.HTMLParser())
|
||||||
cover = str(lx.xpath("//div[@class='items_article_MainitemThumb']/span/img/@src")).strip(" ['']")
|
cover = str(lx.xpath("//div[@class='items_article_MainitemThumb']/span/img/@src")).strip(" ['']")
|
||||||
|
cover = ADC_function.urljoin('https://adult.contents.fc2.com', cover)
|
||||||
dic = {
|
dic = {
|
||||||
'title': lx.xpath('/html/head/title/text()')[0],
|
'title': lx.xpath('/html/head/title/text()')[0],
|
||||||
'studio': getStudio_fc2com(htmlcode2),
|
'studio': getStudio_fc2com(htmlcode2),
|
||||||
@@ -116,7 +117,7 @@ def main(number):
|
|||||||
'series': '',
|
'series': '',
|
||||||
}
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if ADC_function.config.Config().debug():
|
if ADC_function.config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
dic = {"title": ""}
|
dic = {"title": ""}
|
||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
||||||
@@ -124,4 +125,5 @@ def main(number):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
print(main('FC2-1787685'))
|
print(main('FC2-1787685'))
|
||||||
|
print(main('FC2-2086710'))
|
||||||
|
|
||||||
|
|||||||
@@ -103,7 +103,7 @@ def main(number):
|
|||||||
'series': '',
|
'series': '',
|
||||||
}
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if ADC_function.config.Config().debug():
|
if ADC_function.config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
dic = {"title": ""}
|
dic = {"title": ""}
|
||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
||||||
|
|||||||
@@ -6,8 +6,7 @@ from lxml import etree#need install
|
|||||||
from bs4 import BeautifulSoup#need install
|
from bs4 import BeautifulSoup#need install
|
||||||
import json
|
import json
|
||||||
from ADC_function import *
|
from ADC_function import *
|
||||||
from WebCrawler import fanza
|
import inspect
|
||||||
from WebCrawler import airav
|
|
||||||
|
|
||||||
def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img
|
def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img
|
||||||
soup = BeautifulSoup(htmlcode, 'lxml')
|
soup = BeautifulSoup(htmlcode, 'lxml')
|
||||||
@@ -82,12 +81,16 @@ def getCID(htmlcode):
|
|||||||
result = re.sub('/.*?.jpg','',string)
|
result = re.sub('/.*?.jpg','',string)
|
||||||
return result
|
return result
|
||||||
def getOutline(number): #获取剧情介绍
|
def getOutline(number): #获取剧情介绍
|
||||||
|
if any(caller for caller in inspect.stack() if os.path.basename(caller.filename) == 'airav.py'):
|
||||||
|
return '' # 从airav.py过来的调用不计算outline直接返回,避免重复抓取数据拖慢处理速度
|
||||||
try:
|
try:
|
||||||
response = json.loads(airav.main(number))
|
htmlcode = get_html('https://cn.airav.wiki/video/' + number)
|
||||||
result = response['outline']
|
from WebCrawler.airav import getOutline as airav_getOutline
|
||||||
|
result = airav_getOutline(htmlcode)
|
||||||
return result
|
return result
|
||||||
except:
|
except:
|
||||||
return ''
|
pass
|
||||||
|
return ''
|
||||||
def getSerise(htmlcode): #获取系列 已修改
|
def getSerise(htmlcode): #获取系列 已修改
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
# 如果记录中冇导演,系列排在第6位
|
# 如果记录中冇导演,系列排在第6位
|
||||||
@@ -117,13 +120,15 @@ def getExtrafanart(htmlcode): # 获取剧照
|
|||||||
extrafanart_pather = re.compile(r'<a class=\"sample-box\" href=\"(.*?)\"')
|
extrafanart_pather = re.compile(r'<a class=\"sample-box\" href=\"(.*?)\"')
|
||||||
extrafanart_imgs = extrafanart_pather.findall(html)
|
extrafanart_imgs = extrafanart_pather.findall(html)
|
||||||
if extrafanart_imgs:
|
if extrafanart_imgs:
|
||||||
return extrafanart_imgs
|
return [urljoin('https://www.javbus.com',img) for img in extrafanart_imgs]
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
def main_uncensored(number):
|
def main_uncensored(number):
|
||||||
htmlcode = get_html('https://www.javbus.com/ja/' + number)
|
htmlcode = get_html('https://www.javbus.com/ja/' + number)
|
||||||
if getTitle(htmlcode) == '':
|
if getTitle(htmlcode) == '':
|
||||||
htmlcode = get_html('https://www.javbus.com/ja/' + number.replace('-','_'))
|
htmlcode = get_html('https://www.javbus.com/ja/' + number.replace('-','_'))
|
||||||
|
if "<title>404 Page Not Found" in htmlcode:
|
||||||
|
raise Exception('404 page not found')
|
||||||
dic = {
|
dic = {
|
||||||
'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))).replace(getNum(htmlcode)+'-',''),
|
'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))).replace(getNum(htmlcode)+'-',''),
|
||||||
'studio': getStudio(htmlcode),
|
'studio': getStudio(htmlcode),
|
||||||
@@ -155,6 +160,8 @@ def main(number):
|
|||||||
htmlcode = get_html('https://www.fanbus.us/' + number)
|
htmlcode = get_html('https://www.fanbus.us/' + number)
|
||||||
except:
|
except:
|
||||||
htmlcode = get_html('https://www.javbus.com/' + number)
|
htmlcode = get_html('https://www.javbus.com/' + number)
|
||||||
|
if "<title>404 Page Not Found" in htmlcode:
|
||||||
|
raise Exception('404 page not found')
|
||||||
dic = {
|
dic = {
|
||||||
'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))),
|
'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))),
|
||||||
'studio': getStudio(htmlcode),
|
'studio': getStudio(htmlcode),
|
||||||
@@ -180,7 +187,7 @@ def main(number):
|
|||||||
except:
|
except:
|
||||||
return main_uncensored(number)
|
return main_uncensored(number)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.Config().debug():
|
if config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
data = {
|
data = {
|
||||||
"title": "",
|
"title": "",
|
||||||
@@ -191,5 +198,7 @@ def main(number):
|
|||||||
return js
|
return js
|
||||||
|
|
||||||
if __name__ == "__main__" :
|
if __name__ == "__main__" :
|
||||||
|
print(main('ADV-R0624')) # 404
|
||||||
print(main('ipx-292'))
|
print(main('ipx-292'))
|
||||||
print(main('CEMD-011'))
|
print(main('CEMD-011'))
|
||||||
|
print(main('CJOD-278'))
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from lxml import etree
|
|||||||
import json
|
import json
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from ADC_function import *
|
from ADC_function import *
|
||||||
from WebCrawler import airav
|
import secrets
|
||||||
# import sys
|
# import sys
|
||||||
# import io
|
# import io
|
||||||
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
||||||
@@ -21,7 +21,7 @@ def getActor(a):
|
|||||||
genders = html.xpath('//span[@class="value"]/a[contains(@href,"/actors/")]/../strong/@class')
|
genders = html.xpath('//span[@class="value"]/a[contains(@href,"/actors/")]/../strong/@class')
|
||||||
r = []
|
r = []
|
||||||
idx = 0
|
idx = 0
|
||||||
actor_gendor = config.Config().actor_gender()
|
actor_gendor = config.getInstance().actor_gender()
|
||||||
if not actor_gendor in ['female','male','both','all']:
|
if not actor_gendor in ['female','male','both','all']:
|
||||||
actor_gendor = 'female'
|
actor_gendor = 'female'
|
||||||
for act in actors:
|
for act in actors:
|
||||||
@@ -67,9 +67,15 @@ def getStudio(a):
|
|||||||
patherr = re.compile(r'<strong>片商\:</strong>[\s\S]*?<a href=\".*?>(.*?)</a></span>')
|
patherr = re.compile(r'<strong>片商\:</strong>[\s\S]*?<a href=\".*?>(.*?)</a></span>')
|
||||||
pianshang = patherr.findall(a)
|
pianshang = patherr.findall(a)
|
||||||
if pianshang:
|
if pianshang:
|
||||||
result = pianshang[0]
|
result = pianshang[0].strip()
|
||||||
else:
|
if len(result):
|
||||||
result = ""
|
return result
|
||||||
|
# 以卖家作为工作室
|
||||||
|
html = etree.fromstring(a, etree.HTMLParser())
|
||||||
|
try:
|
||||||
|
result = str(html.xpath('//strong[contains(text(),"賣家:")]/../span/a/text()')).strip(" ['']")
|
||||||
|
except:
|
||||||
|
result = ''
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getRuntime(a):
|
def getRuntime(a):
|
||||||
@@ -171,16 +177,13 @@ def getTrailer(htmlcode): # 获取预告片
|
|||||||
return video_url
|
return video_url
|
||||||
|
|
||||||
def getExtrafanart(htmlcode): # 获取剧照
|
def getExtrafanart(htmlcode): # 获取剧照
|
||||||
html_pather = re.compile(r'<div class=\"tile\-images preview\-images\">[\s\S]*?</a>\s+?</div>\s+?</div>')
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
html = html_pather.search(htmlcode)
|
result = []
|
||||||
if html:
|
try:
|
||||||
html = html.group()
|
result = html.xpath("//article[@class='message video-panel']/div[@class='message-body']/div[@class='tile-images preview-images']/a[contains(@href,'/samples/')]/@href")
|
||||||
extrafanart_pather = re.compile(r'<a class="tile-item" href=\"(.*?)\"')
|
except:
|
||||||
extrafanart_imgs = extrafanart_pather.findall(html)
|
pass
|
||||||
if extrafanart_imgs:
|
return result
|
||||||
return extrafanart_imgs
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def getCover(htmlcode):
|
def getCover(htmlcode):
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
try:
|
try:
|
||||||
@@ -195,11 +198,13 @@ def getDirector(a):
|
|||||||
return str(result1 + result2).strip('+').replace("', '", '').replace('"', '')
|
return str(result1 + result2).strip('+').replace("', '", '').replace('"', '')
|
||||||
def getOutline(number): #获取剧情介绍
|
def getOutline(number): #获取剧情介绍
|
||||||
try:
|
try:
|
||||||
response = json.loads(airav.main(number))
|
htmlcode = get_html('https://cn.airav.wiki/video/' + number)
|
||||||
result = response['outline']
|
from WebCrawler.airav import getOutline as airav_getOutline
|
||||||
|
result = airav_getOutline(htmlcode)
|
||||||
return result
|
return result
|
||||||
except:
|
except:
|
||||||
return ''
|
pass
|
||||||
|
return ''
|
||||||
def getSeries(a):
|
def getSeries(a):
|
||||||
#/html/body/section/div/div[3]/div[2]/nav/div[7]/span/a
|
#/html/body/section/div/div[3]/div[2]/nav/div[7]/span/a
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
||||||
@@ -208,7 +213,7 @@ def getSeries(a):
|
|||||||
return str(result1 + result2).strip('+').replace("', '", '').replace('"', '')
|
return str(result1 + result2).strip('+').replace("', '", '').replace('"', '')
|
||||||
|
|
||||||
def main(number):
|
def main(number):
|
||||||
javdb_site = random.choice(["javdb9", "javdb30"])
|
javdb_site = secrets.choice(["javdb9", "javdb30"])
|
||||||
try:
|
try:
|
||||||
# if re.search(r'[a-zA-Z]+\.\d{2}\.\d{2}\.\d{2}', number).group():
|
# if re.search(r'[a-zA-Z]+\.\d{2}\.\d{2}\.\d{2}', number).group():
|
||||||
# pass
|
# pass
|
||||||
@@ -303,8 +308,16 @@ f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not b
|
|||||||
'series': getSeries(detail_page),
|
'series': getSeries(detail_page),
|
||||||
|
|
||||||
}
|
}
|
||||||
|
if not dic['actor'] and re.match(r'FC2-[\d]+', number, re.A):
|
||||||
|
dic['actor'].append('素人')
|
||||||
|
if not dic['series']:
|
||||||
|
dic['series'] = dic['studio']
|
||||||
|
if not dic['label']:
|
||||||
|
dic['label'] = dic['studio']
|
||||||
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.Config().debug():
|
if config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
dic = {"title": ""}
|
dic = {"title": ""}
|
||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
||||||
@@ -316,7 +329,9 @@ if __name__ == "__main__":
|
|||||||
# print(main('blacked.20.05.30'))
|
# print(main('blacked.20.05.30'))
|
||||||
# print(main('AGAV-042'))
|
# print(main('AGAV-042'))
|
||||||
# print(main('BANK-022'))
|
# print(main('BANK-022'))
|
||||||
print(main('FC2-735670'))
|
print(main('093021_539')) # 没有剧照 片商pacopacomama
|
||||||
print(main('FC2-1174949')) # not found
|
# print(main('FC2-2278260'))
|
||||||
|
# print(main('FC2-735670'))
|
||||||
|
# print(main('FC2-1174949')) # not found
|
||||||
print(main('MVSD-439'))
|
print(main('MVSD-439'))
|
||||||
print(main('EHM0001')) # not found
|
# print(main('EHM0001')) # not found
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ def main(number2):
|
|||||||
'series': getSeries(a),
|
'series': getSeries(a),
|
||||||
}
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.Config().debug():
|
if config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
dic = {"title": ""}
|
dic = {"title": ""}
|
||||||
|
|
||||||
|
|||||||
@@ -224,7 +224,7 @@ def main(number):
|
|||||||
'series': getSeries(detail_page),
|
'series': getSeries(detail_page),
|
||||||
}
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if config.Config().debug():
|
if config.getInstance().debug():
|
||||||
print(e)
|
print(e)
|
||||||
dic = {"title": ""}
|
dic = {"title": ""}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user