爬虫面向对象重构 #2
This commit is contained in:
@@ -9,130 +9,33 @@ from urllib.parse import urlencode
|
|||||||
from lxml import etree
|
from lxml import etree
|
||||||
|
|
||||||
from ADC_function import *
|
from ADC_function import *
|
||||||
|
from crawler import *
|
||||||
# import sys
|
# import sys
|
||||||
# import io
|
# import io
|
||||||
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
||||||
|
|
||||||
|
class fanzaCrawler(Crawler):
|
||||||
|
def getFanzaString(self,string):
|
||||||
|
result1 = str(self.html.xpath("//td[contains(text(),'"+string+"')]/following-sibling::td/a/text()")).strip(" ['']")
|
||||||
|
result2 = str(self.html.xpath("//td[contains(text(),'"+string+"')]/following-sibling::td/text()")).strip(" ['']")
|
||||||
|
return result1+result2
|
||||||
|
|
||||||
def getTitle(text):
|
def getFanzaStrings(self, string):
|
||||||
html = etree.fromstring(text, etree.HTMLParser())
|
result1 = self.html.xpath("//td[contains(text(),'" + string + "')]/following-sibling::td/a/text()")
|
||||||
result = html.xpath('//*[starts-with(@id, "title")]/text()')[0]
|
if len(result1) > 0:
|
||||||
return result
|
return result1
|
||||||
|
result2 = self.html.xpath("//td[contains(text(),'" + string + "')]/following-sibling::td/text()")
|
||||||
|
return result2
|
||||||
|
|
||||||
|
|
||||||
def getActor(text):
|
def getRelease(fanza_Crawler):
|
||||||
# //*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
|
result = fanza_Crawler.getFanzaString('発売日:')
|
||||||
html = etree.fromstring(text, etree.HTMLParser())
|
if result == '----':
|
||||||
result = (
|
result = fanza_Crawler.getFanzaString('配信開始日:')
|
||||||
str(
|
return result.replace("/", "-").strip('\\n')
|
||||||
html.xpath(
|
|
||||||
"//td[contains(text(),'出演者')]/following-sibling::td/span/a/text()"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.strip(" ['']")
|
|
||||||
.replace("', '", ",")
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def getStudio(text):
|
def getCover(html, number):
|
||||||
html = etree.fromstring(text, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'メーカー')]/following-sibling::td/a/text()"
|
|
||||||
)[0]
|
|
||||||
except:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'メーカー')]/following-sibling::td/text()"
|
|
||||||
)[0]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def getRuntime(text):
|
|
||||||
html = etree.fromstring(text, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result = html.xpath("//td[contains(text(),'収録時間')]/following-sibling::td/text()")[0]
|
|
||||||
return re.search(r"\d+", str(result)).group()
|
|
||||||
|
|
||||||
|
|
||||||
def getLabel(text):
|
|
||||||
html = etree.fromstring(text, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'レーベル:')]/following-sibling::td/a/text()"
|
|
||||||
)[0]
|
|
||||||
except:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'レーベル:')]/following-sibling::td/text()"
|
|
||||||
)[0]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def getNum(text):
|
|
||||||
html = etree.fromstring(text, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'品番:')]/following-sibling::td/a/text()"
|
|
||||||
)[0]
|
|
||||||
except:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'品番:')]/following-sibling::td/text()"
|
|
||||||
)[0]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def getYear(getRelease):
|
|
||||||
try:
|
|
||||||
result = str(re.search(r"\d{4}", getRelease).group())
|
|
||||||
return result
|
|
||||||
except:
|
|
||||||
return getRelease
|
|
||||||
|
|
||||||
|
|
||||||
def getRelease(text):
|
|
||||||
html = etree.fromstring(text, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'発売日:')]/following-sibling::td/a/text()"
|
|
||||||
)[0].lstrip("\n")
|
|
||||||
except:
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'発売日:')]/following-sibling::td/text()"
|
|
||||||
)[0].lstrip("\n")
|
|
||||||
except:
|
|
||||||
result = "----"
|
|
||||||
if result == "----":
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'配信開始日:')]/following-sibling::td/a/text()"
|
|
||||||
)[0].lstrip("\n")
|
|
||||||
except:
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'配信開始日:')]/following-sibling::td/text()"
|
|
||||||
)[0].lstrip("\n")
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return result.replace("/", "-")
|
|
||||||
|
|
||||||
|
|
||||||
def getTag(text):
|
|
||||||
html = etree.fromstring(text, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'ジャンル:')]/following-sibling::td/a/text()"
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
except:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'ジャンル:')]/following-sibling::td/text()"
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def getCover(text, number):
|
|
||||||
html = etree.fromstring(text, etree.HTMLParser())
|
|
||||||
cover_number = number
|
cover_number = number
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//*[@id="' + cover_number + '"]/@href')[0]
|
result = html.xpath('//*[@id="' + cover_number + '"]/@href')[0]
|
||||||
@@ -151,29 +54,11 @@ def getCover(text, number):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def getDirector(text):
|
def getOutline(html):
|
||||||
html = etree.fromstring(text, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result = html.xpath(
|
result = str(html.xpath("//div[@class='mg-b20 lh4']/text()")[0]).replace("\n", "")
|
||||||
"//td[contains(text(),'監督:')]/following-sibling::td/a/text()"
|
|
||||||
)[0]
|
|
||||||
except:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'監督:')]/following-sibling::td/text()"
|
|
||||||
)[0]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def getOutline(text):
|
|
||||||
html = etree.fromstring(text, etree.HTMLParser())
|
|
||||||
try:
|
|
||||||
result = str(html.xpath("//div[@class='mg-b20 lh4']/text()")[0]).replace(
|
|
||||||
"\n", ""
|
|
||||||
)
|
|
||||||
if result == "":
|
if result == "":
|
||||||
result = str(html.xpath("//div[@class='mg-b20 lh4']//p/text()")[0]).replace(
|
result = str(html.xpath("//div[@class='mg-b20 lh4']//p/text()")[0]).replace("\n", "")
|
||||||
"\n", ""
|
|
||||||
)
|
|
||||||
except:
|
except:
|
||||||
# (TODO) handle more edge case
|
# (TODO) handle more edge case
|
||||||
# print(html)
|
# print(html)
|
||||||
@@ -181,21 +66,6 @@ def getOutline(text):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def getSeries(text):
|
|
||||||
try:
|
|
||||||
html = etree.fromstring(text, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'シリーズ:')]/following-sibling::td/a/text()"
|
|
||||||
)[0]
|
|
||||||
except:
|
|
||||||
result = html.xpath(
|
|
||||||
"//td[contains(text(),'シリーズ:')]/following-sibling::td/text()"
|
|
||||||
)[0]
|
|
||||||
return result
|
|
||||||
except:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def getExtrafanart(htmlcode): # 获取剧照
|
def getExtrafanart(htmlcode): # 获取剧照
|
||||||
html_pather = re.compile(r'<div id=\"sample-image-block\"[\s\S]*?<br></div></div>')
|
html_pather = re.compile(r'<div id=\"sample-image-block\"[\s\S]*?<br></div></div>')
|
||||||
html = html_pather.search(htmlcode)
|
html = html_pather.search(htmlcode)
|
||||||
@@ -232,6 +102,7 @@ def main(number):
|
|||||||
"https://www.dmm.co.jp/rental/-/detail/=/cid=",
|
"https://www.dmm.co.jp/rental/-/detail/=/cid=",
|
||||||
]
|
]
|
||||||
chosen_url = ""
|
chosen_url = ""
|
||||||
|
fanza_Crawler = ''
|
||||||
|
|
||||||
for url in fanza_urls:
|
for url in fanza_urls:
|
||||||
chosen_url = url + fanza_search_number
|
chosen_url = url + fanza_search_number
|
||||||
@@ -240,6 +111,7 @@ def main(number):
|
|||||||
urlencode({"rurl": chosen_url})
|
urlencode({"rurl": chosen_url})
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
fanza_Crawler = fanzaCrawler(htmlcode)
|
||||||
if "404 Not Found" not in htmlcode:
|
if "404 Not Found" not in htmlcode:
|
||||||
break
|
break
|
||||||
if "404 Not Found" in htmlcode:
|
if "404 Not Found" in htmlcode:
|
||||||
@@ -249,35 +121,34 @@ def main(number):
|
|||||||
# for example, the url will be cid=test012
|
# for example, the url will be cid=test012
|
||||||
# but the hinban on the page is test00012
|
# but the hinban on the page is test00012
|
||||||
# so get the hinban first, and then pass it to following functions
|
# so get the hinban first, and then pass it to following functions
|
||||||
fanza_hinban = getNum(htmlcode)
|
fanza_hinban = fanza_Crawler.getFanzaString('品番:')
|
||||||
out_num = fanza_hinban
|
out_num = fanza_hinban
|
||||||
number_lo = number.lower()
|
number_lo = number.lower()
|
||||||
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
if (re.sub('-|_', '', number_lo) == fanza_hinban or
|
if (re.sub('-|_', '', number_lo) == fanza_hinban or
|
||||||
number_lo.replace('-', '00') == fanza_hinban or
|
number_lo.replace('-', '00') == fanza_hinban or
|
||||||
number_lo.replace('-', '') + 'so' == fanza_hinban
|
number_lo.replace('-', '') + 'so' == fanza_hinban
|
||||||
):
|
):
|
||||||
out_num = number
|
out_num = number
|
||||||
data = {
|
data = {
|
||||||
"title": getTitle(htmlcode).strip(),
|
"title": fanza_Crawler.getString('//*[starts-with(@id, "title")]/text()').strip(),
|
||||||
"studio": getStudio(htmlcode),
|
"studio": fanza_Crawler.getFanzaString('メーカー'),
|
||||||
"outline": getOutline(htmlcode),
|
"outline": getOutline(html),
|
||||||
"runtime": getRuntime(htmlcode),
|
"runtime": str(re.search(r'\d+',fanza_Crawler.getString("//td[contains(text(),'収録時間')]/following-sibling::td/text()")).group()).strip(" ['']"),
|
||||||
"director": getDirector(htmlcode) if "anime" not in chosen_url else "",
|
"director": fanza_Crawler.getFanzaString('監督:') if "anime" not in chosen_url else "",
|
||||||
"actor": getActor(htmlcode) if "anime" not in chosen_url else "",
|
"actor": fanza_Crawler.getString("//td[contains(text(),'出演者')]/following-sibling::td/span/a/text()").replace("', '", ",") if "anime" not in chosen_url else "",
|
||||||
"release": getRelease(htmlcode),
|
"release": getRelease(fanza_Crawler),
|
||||||
"number": out_num,
|
"number": out_num,
|
||||||
"cover": getCover(htmlcode, fanza_hinban),
|
"cover": getCover(html, fanza_hinban),
|
||||||
"imagecut": 1,
|
"imagecut": 1,
|
||||||
"tag": getTag(htmlcode),
|
"tag": fanza_Crawler.getFanzaStrings('ジャンル:'),
|
||||||
"extrafanart": getExtrafanart(htmlcode),
|
"extrafanart": getExtrafanart(htmlcode),
|
||||||
"label": getLabel(htmlcode),
|
"label": fanza_Crawler.getFanzaString('レーベル'),
|
||||||
"year": getYear(
|
"year": re.findall('\d{4}',getRelease(fanza_Crawler))[0], # str(re.search('\d{4}',getRelease(a)).group()),
|
||||||
getRelease(htmlcode)
|
|
||||||
), # str(re.search('\d{4}',getRelease(a)).group()),
|
|
||||||
"actor_photo": "",
|
"actor_photo": "",
|
||||||
"website": chosen_url,
|
"website": chosen_url,
|
||||||
"source": "fanza.py",
|
"source": "fanza.py",
|
||||||
"series": getSeries(htmlcode),
|
"series": fanza_Crawler.getFanzaString('シリーズ:'),
|
||||||
}
|
}
|
||||||
except:
|
except:
|
||||||
data = {
|
data = {
|
||||||
|
|||||||
@@ -4,58 +4,11 @@ import re
|
|||||||
from lxml import etree#need install
|
from lxml import etree#need install
|
||||||
import json
|
import json
|
||||||
import ADC_function
|
import ADC_function
|
||||||
|
from crawler import *
|
||||||
# import sys
|
# import sys
|
||||||
# import io
|
# import io
|
||||||
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
||||||
|
|
||||||
def getTitle_fc2com(htmlcode): #获取厂商
|
|
||||||
html = etree.fromstring(htmlcode,etree.HTMLParser())
|
|
||||||
result = html.xpath('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/h3/text()')[0]
|
|
||||||
return result
|
|
||||||
def getActor_fc2com(htmlcode):
|
|
||||||
try:
|
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
result = html.xpath('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/ul/li[3]/a/text()')[0]
|
|
||||||
return result
|
|
||||||
except:
|
|
||||||
return ''
|
|
||||||
def getStudio_fc2com(htmlcode): #获取厂商
|
|
||||||
try:
|
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
result = str(html.xpath('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/ul/li[3]/a/text()')).strip(" ['']")
|
|
||||||
return result
|
|
||||||
except:
|
|
||||||
return ''
|
|
||||||
def getNum_fc2com(htmlcode): #获取番号
|
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[1]/span[2]/text()')).strip(" ['']")
|
|
||||||
return result
|
|
||||||
def getRelease_fc2com(htmlcode2): #
|
|
||||||
html=etree.fromstring(htmlcode2,etree.HTMLParser())
|
|
||||||
result = str(html.xpath('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/div[2]/p/text()')).strip(" ['販売日 : ']").replace('/','-')
|
|
||||||
return result
|
|
||||||
def getCover_fc2com(htmlcode2): #获取厂商 #
|
|
||||||
html = etree.fromstring(htmlcode2, etree.HTMLParser())
|
|
||||||
result = str(html.xpath('//*[@id="top"]/div[1]/section[1]/div/section/div[1]/span/img/@src')).strip(" ['']")
|
|
||||||
return 'http:' + result
|
|
||||||
# def getOutline_fc2com(htmlcode2): #获取番号 #
|
|
||||||
# xpath_html = etree.fromstring(htmlcode2, etree.HTMLParser())
|
|
||||||
# path = str(xpath_html.xpath('//*[@id="top"]/div[1]/section[4]/iframe/@src')).strip(" ['']")
|
|
||||||
# html = etree.fromstring(ADC_function.get_html('https://adult.contents.fc2.com/'+path), etree.HTMLParser())
|
|
||||||
# print('https://adult.contents.fc2.com'+path)
|
|
||||||
# print(ADC_function.get_html('https://adult.contents.fc2.com'+path,cookies={'wei6H':'1'}))
|
|
||||||
# result = str(html.xpath('/html/body/div/text()')).strip(" ['']").replace("\\n",'',10000).replace("'",'',10000).replace(', ,','').strip(' ').replace('。,',',')
|
|
||||||
# return result
|
|
||||||
def getTag_fc2com(lx):
|
|
||||||
result = lx.xpath("//a[@class='tag tagTag']/text()")
|
|
||||||
return result
|
|
||||||
def getYear_fc2com(release):
|
|
||||||
try:
|
|
||||||
result = re.search('\d{4}',release).group()
|
|
||||||
return result
|
|
||||||
except:
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def getExtrafanart(htmlcode): # 获取剧照
|
def getExtrafanart(htmlcode): # 获取剧照
|
||||||
html_pather = re.compile(r'<ul class=\"items_article_SampleImagesArea\"[\s\S]*?</ul>')
|
html_pather = re.compile(r'<ul class=\"items_article_SampleImagesArea\"[\s\S]*?</ul>')
|
||||||
html = html_pather.search(htmlcode)
|
html = html_pather.search(htmlcode)
|
||||||
@@ -79,27 +32,30 @@ def getTrailer(htmlcode, number):
|
|||||||
except:
|
except:
|
||||||
return ''
|
return ''
|
||||||
else:
|
else:
|
||||||
video_url = ''
|
return ''
|
||||||
|
|
||||||
def main(number):
|
def main(number):
|
||||||
try:
|
try:
|
||||||
number = number.replace('FC2-', '').replace('fc2-', '')
|
number = number.replace('FC2-', '').replace('fc2-', '')
|
||||||
htmlcode2 = ADC_function.get_html('https://adult.contents.fc2.com/article/' + number + '/', encoding='utf-8')
|
htmlcode2 = ADC_function.get_html('https://adult.contents.fc2.com/article/' + number + '/', encoding='utf-8')
|
||||||
actor = getActor_fc2com(htmlcode2)
|
fc2_crawler = Crawler(htmlcode2)
|
||||||
if not actor:
|
actor = fc2_crawler.getString('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/ul/li[3]/a/text()')
|
||||||
|
if actor == "":
|
||||||
actor = '素人'
|
actor = '素人'
|
||||||
lx = etree.fromstring(htmlcode2, etree.HTMLParser())
|
lx = etree.fromstring(htmlcode2, etree.HTMLParser())
|
||||||
cover = str(lx.xpath("//div[@class='items_article_MainitemThumb']/span/img/@src")).strip(" ['']")
|
cover = fc2_crawler.getString("//div[@class='items_article_MainitemThumb']/span/img/@src")
|
||||||
cover = ADC_function.urljoin('https://adult.contents.fc2.com', cover)
|
cover = ADC_function.urljoin('https://adult.contents.fc2.com', cover)
|
||||||
|
release = fc2_crawler.getString('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/div[2]/p/text()').\
|
||||||
|
strip(" ['販売日 : ']").replace('/','-')
|
||||||
dic = {
|
dic = {
|
||||||
'title': lx.xpath('/html/head/title/text()')[0],
|
'title': fc2_crawler.getString('/html/head/title/text()'),
|
||||||
'studio': getStudio_fc2com(htmlcode2),
|
'studio': fc2_crawler.getString('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/ul/li[3]/a/text()'),
|
||||||
'year': getYear_fc2com(getRelease_fc2com(htmlcode2)),
|
'year': re.findall('\d{4}',release)[0],
|
||||||
'outline': '', # getOutline_fc2com(htmlcode2),
|
'outline': '', # getOutline_fc2com(htmlcode2),
|
||||||
'runtime': str(lx.xpath("//p[@class='items_article_info']/text()")[0]),
|
'runtime': str(lx.xpath("//p[@class='items_article_info']/text()")[0]),
|
||||||
'director': getStudio_fc2com(htmlcode2),
|
'director': fc2_crawler.getString('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/ul/li[3]/a/text()'),
|
||||||
'actor': actor,
|
'actor': actor,
|
||||||
'release': getRelease_fc2com(htmlcode2),
|
'release': release,
|
||||||
'number': 'FC2-' + number,
|
'number': 'FC2-' + number,
|
||||||
'label': '',
|
'label': '',
|
||||||
'cover': cover,
|
'cover': cover,
|
||||||
@@ -107,7 +63,7 @@ def main(number):
|
|||||||
'extrafanart': getExtrafanart(htmlcode2),
|
'extrafanart': getExtrafanart(htmlcode2),
|
||||||
"trailer": getTrailer(htmlcode2, number),
|
"trailer": getTrailer(htmlcode2, number),
|
||||||
'imagecut': 0,
|
'imagecut': 0,
|
||||||
'tag': getTag_fc2com(lx),
|
'tag': fc2_crawler.getStrings("//a[@class='tag tagTag']/text()"),
|
||||||
'actor_photo': '',
|
'actor_photo': '',
|
||||||
'website': 'https://adult.contents.fc2.com/article/' + number + '/',
|
'website': 'https://adult.contents.fc2.com/article/' + number + '/',
|
||||||
'source': 'https://adult.contents.fc2.com/article/' + number + '/',
|
'source': 'https://adult.contents.fc2.com/article/' + number + '/',
|
||||||
@@ -121,7 +77,4 @@ def main(number):
|
|||||||
return js
|
return js
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
print(main('FC2-1787685'))
|
print(main('FC2-2182382'))
|
||||||
print(main('FC2-2086710'))
|
|
||||||
print(main('FC2-2182382'))
|
|
||||||
|
|
||||||
@@ -5,95 +5,28 @@ from lxml import etree
|
|||||||
import json
|
import json
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from ADC_function import *
|
from ADC_function import *
|
||||||
|
from crawler import *
|
||||||
# import sys
|
# import sys
|
||||||
# import io
|
# import io
|
||||||
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
||||||
|
|
||||||
def getTitle(a):
|
class MgsCrawler(Crawler):
|
||||||
try:
|
def getMgsString(self, _xpath):
|
||||||
html = etree.fromstring(a, etree.HTMLParser())
|
html = self.html
|
||||||
result = str(html.xpath('//*[@id="center_column"]/div[1]/h1/text()')).strip(" ['']")
|
result1 = str(html.xpath(_xpath)).strip(" ['']").strip('\\n ').strip('\\n').strip(" ['']").replace(u'\\n', '').replace("', '', '", '')
|
||||||
return result.replace('/', ',')
|
result2 = str(html.xpath(_xpath.replace('td/a/','td/'))).strip(" ['']").strip('\\n ').strip('\\n')
|
||||||
except:
|
return str(result1 + result2).strip('+').replace("', '",'').replace('"','')
|
||||||
return ''
|
|
||||||
def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
|
|
||||||
result1=str(html.xpath('//th[contains(text(),"出演:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip('\\n')
|
|
||||||
result2=str(html.xpath('//th[contains(text(),"出演:")]/../td/text()')).strip(" ['']").strip('\\n ').strip('\\n')
|
|
||||||
return str(result1+result2).strip('+').replace("', '",'').replace('"','').replace('/',',')
|
|
||||||
def getStudio(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
|
|
||||||
result1=str(html.xpath('//th[contains(text(),"メーカー:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip('\\n')
|
|
||||||
result2=str(html.xpath('//th[contains(text(),"メーカー:")]/../td/text()')).strip(" ['']").strip('\\n ').strip('\\n')
|
|
||||||
return str(result1+result2).strip('+').replace("', '",'').replace('"','')
|
|
||||||
def getRuntime(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result1 = str(html.xpath('//th[contains(text(),"収録時間:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip('\\n')
|
|
||||||
result2 = str(html.xpath('//th[contains(text(),"収録時間:")]/../td/text()')).strip(" ['']").strip('\\n ').strip('\\n')
|
|
||||||
return str(result1 + result2).strip('+').rstrip('mi')
|
|
||||||
def getLabel(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result1 = str(html.xpath('//th[contains(text(),"シリーズ:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
result2 = str(html.xpath('//th[contains(text(),"シリーズ:")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
return str(result1 + result2).strip('+').replace("', '",'').replace('"','')
|
|
||||||
def getNum(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result1 = str(html.xpath('//th[contains(text(),"品番:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
result2 = str(html.xpath('//th[contains(text(),"品番:")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
return str(result1 + result2).strip('+')
|
|
||||||
def getYear(getRelease):
|
|
||||||
try:
|
|
||||||
result = str(re.search('\d{4}',getRelease).group())
|
|
||||||
return result
|
|
||||||
except:
|
|
||||||
return getRelease
|
|
||||||
def getRelease(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result1 = str(html.xpath('//th[contains(text(),"配信開始日:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
result2 = str(html.xpath('//th[contains(text(),"配信開始日:")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
return str(result1 + result2).strip('+').replace('/','-')
|
|
||||||
def getTag(a):
|
def getTag(a):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
||||||
result1 = str(html.xpath('//th[contains(text(),"ジャンル:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
|
result1 = str(html.xpath('//th[contains(text(),"ジャンル:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip('\\n')
|
||||||
'\\n')
|
result2 = str(html.xpath('//th[contains(text(),"ジャンル:")]/../td/text()')).strip(" ['']").strip('\\n ').strip('\\n')
|
||||||
result2 = str(html.xpath('//th[contains(text(),"ジャンル:")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
result = str(result1 + result2).strip('+').replace("', '\\n",",").replace("', '","").replace('"','').replace(',,','').split(',')
|
result = str(result1 + result2).strip('+').replace("', '\\n",",").replace("', '","").replace('"','').replace(',,','').split(',')
|
||||||
return result
|
return result
|
||||||
def getCover(htmlcode):
|
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
result = str(html.xpath('//*[@id="EnlargeImage"]/@href')).strip(" ['']")
|
|
||||||
# result = str(html.xpath('//*[@id="center_column"]/div[1]/div[1]/div/div/h2/img/@src')).strip(" ['']")
|
|
||||||
# /html/body/div[2]/article[2]/div[1]/div[1]/div/div/h2/img/@src
|
|
||||||
return result
|
|
||||||
def getDirector(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result1 = str(html.xpath('//th[contains(text(),"シリーズ")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
result2 = str(html.xpath('//th[contains(text(),"シリーズ")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
return str(result1 + result2).strip('+').replace("', '",'').replace('"','')
|
|
||||||
def getOutline(htmlcode):
|
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
result = str(html.xpath('//p/text()')).strip(" ['']").replace(u'\\n', '').replace("', '', '", '')
|
|
||||||
return result
|
|
||||||
def getSeries(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result1 = str(html.xpath('//th[contains(text(),"シリーズ")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
result2 = str(html.xpath('//th[contains(text(),"シリーズ")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
|
|
||||||
'\\n')
|
|
||||||
return str(result1 + result2).strip('+').replace("', '", '').replace('"', '')
|
|
||||||
|
|
||||||
def getExtrafanart(htmlcode): # 获取剧照
|
def getExtrafanart(htmlcode2): # 获取剧照
|
||||||
html_pather = re.compile(r'<dd>\s*?<ul>[\s\S]*?</ul>\s*?</dd>')
|
html_pather = re.compile(r'<dd>\s*?<ul>[\s\S]*?</ul>\s*?</dd>')
|
||||||
html = html_pather.search(htmlcode)
|
html = html_pather.search(htmlcode2)
|
||||||
if html:
|
if html:
|
||||||
html = html.group()
|
html = html.group()
|
||||||
extrafanart_pather = re.compile(r'<a class=\"sample_image\" href=\"(.*?)\"')
|
extrafanart_pather = re.compile(r'<a class=\"sample_image\" href=\"(.*?)\"')
|
||||||
@@ -104,36 +37,35 @@ def getExtrafanart(htmlcode): # 获取剧照
|
|||||||
|
|
||||||
def main(number2):
|
def main(number2):
|
||||||
number=number2.upper()
|
number=number2.upper()
|
||||||
htmlcode=str(get_html('https://www.mgstage.com/product/product_detail/'+str(number)+'/',cookies={'adc':'1'}))
|
htmlcode2=str(get_html('https://www.mgstage.com/product/product_detail/'+str(number)+'/',cookies={'adc':'1'}))
|
||||||
soup = BeautifulSoup(htmlcode, 'lxml')
|
soup = BeautifulSoup(htmlcode2, 'lxml')
|
||||||
a = str(soup.find(attrs={'class': 'detail_data'})).replace('\n ','').replace(' ','').replace('\n ','').replace('\n ','')
|
a2 = str(soup.find(attrs={'class': 'detail_data'})).replace('\n ','').replace(' ','').replace('\n ','').replace('\n ','')
|
||||||
b = str(soup.find(attrs={'id': 'introduction'})).replace('\n ','').replace(' ','').replace('\n ','').replace('\n ','')
|
b2 = str(soup.find(attrs={'id': 'introduction'})).replace('\n ','').replace(' ','').replace('\n ','').replace('\n ','')
|
||||||
|
htmlcode = MgsCrawler(htmlcode2)
|
||||||
|
a = MgsCrawler(a2)
|
||||||
|
b = MgsCrawler(b2)
|
||||||
#print(b)
|
#print(b)
|
||||||
try:
|
dic = {
|
||||||
dic = {
|
'title': htmlcode.getString('//*[@id="center_column"]/div[1]/h1/text()').replace('/', ',').replace("\\n",'').replace(' ', '').strip(),
|
||||||
'title': getTitle(htmlcode).replace("\\n", '').replace(' ', ''),
|
'studio': a.getMgsString('//th[contains(text(),"メーカー:")]/../td/a/text()'),
|
||||||
'studio': getStudio(a),
|
'outline': b.getString('//p/text()').strip(" ['']").replace(u'\\n', '').replace("', '', '", ''),
|
||||||
'outline': getOutline(b),
|
'runtime': a.getMgsString('//th[contains(text(),"収録時間:")]/../td/a/text()').rstrip('mi'),
|
||||||
'runtime': getRuntime(a),
|
'director': a.getMgsString('//th[contains(text(),"シリーズ")]/../td/a/text()'),
|
||||||
'director': getDirector(a),
|
'actor': a.getMgsString('//th[contains(text(),"出演:")]/../td/a/text()'),
|
||||||
'actor': getActor(a),
|
'release': a.getMgsString('//th[contains(text(),"配信開始日:")]/../td/a/text()').replace('/','-'),
|
||||||
'release': getRelease(a),
|
'number': a.getMgsString('//th[contains(text(),"品番:")]/../td/a/text()'),
|
||||||
'number': getNum(a),
|
'cover': htmlcode.getString('//*[@id="EnlargeImage"]/@href'),
|
||||||
'cover': getCover(htmlcode),
|
'imagecut': 1,
|
||||||
'imagecut': 1,
|
'tag': getTag(a2),
|
||||||
'tag': getTag(a),
|
'label': a.getMgsString('//th[contains(text(),"シリーズ:")]/../td/a/text()'),
|
||||||
'label': getLabel(a),
|
'extrafanart': getExtrafanart(htmlcode2),
|
||||||
'extrafanart': getExtrafanart(htmlcode),
|
'year': str(re.findall('\d{4}',a.getMgsString('//th[contains(text(),"配信開始日:")]/../td/a/text()'))).strip(" ['']"),
|
||||||
'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()),
|
# str(re.search('\d{4}',getRelease(a)).group()),
|
||||||
'actor_photo': '',
|
'actor_photo': '',
|
||||||
'website': 'https://www.mgstage.com/product/product_detail/' + str(number) + '/',
|
'website': 'https://www.mgstage.com/product/product_detail/' + str(number) + '/',
|
||||||
'source': 'mgstage.py',
|
'source': 'mgstage.py',
|
||||||
'series': getSeries(a),
|
'series': a.getMgsString('//th[contains(text(),"シリーズ")]/../td/a/text()'),
|
||||||
}
|
}
|
||||||
except Exception as e:
|
|
||||||
if config.getInstance().debug():
|
|
||||||
print(e)
|
|
||||||
dic = {"title": ""}
|
|
||||||
|
|
||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
||||||
return js
|
return js
|
||||||
|
|||||||
Reference in New Issue
Block a user