Update 3.7-5 DEBUG ONLY

This commit is contained in:
root
2020-08-14 17:00:31 +08:00
parent c5a68715ea
commit e687035722
14 changed files with 122 additions and 64 deletions

0
WebCrawler/__init__.py Normal file
View File

View File

@@ -1,3 +1,5 @@
import sys
sys.path.append('../')
import re
from lxml import etree
import json

View File

@@ -2,6 +2,8 @@ import re
from lxml import etree
import json
from bs4 import BeautifulSoup
import sys
sys.path.append('../')
from ADC_function import *
# import sys
# import io
@@ -24,7 +26,10 @@ def getTitle(a):
return result
def getActor(a): # //*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = html.xpath('//th[contains(text(),"声優")]/../td/a/text()')
try:
result1 = html.xpath('//th[contains(text(),"声优")]/../td/a/text()')
except:
result1 = ''
return result1
def getActorPhoto(actor): #//*[@id="star_qdt"]/li/a/img
a = actor.split(',')
@@ -35,7 +40,13 @@ def getActorPhoto(actor): #//*[@id="star_qdt"]/li/a/img
return d
def getStudio(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result = html.xpath('//th[contains(text(),"ブランド名")]/../td/span[1]/a/text()')[0]
try:
try:
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
except:
result = html.xpath('//th[contains(text(),"社团名")]/../td/span[1]/a/text()')[0]
except:
result = ''
return result
def getRuntime(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
@@ -44,7 +55,13 @@ def getRuntime(a):
return str(result1 + result2).strip('+').rstrip('mi')
def getLabel(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result = html.xpath('//th[contains(text(),"ブランド名")]/../td/span[1]/a/text()')[0]
try:
try:
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
except:
result = html.xpath('//th[contains(text(),"社团名")]/../td/span[1]/a/text()')[0]
except:
result = ''
return result
def getYear(getRelease):
try:
@@ -54,12 +71,12 @@ def getYear(getRelease):
return getRelease
def getRelease(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = html.xpath('//th[contains(text(),"販売")]/../td/a/text()')[0]
result1 = html.xpath('//th[contains(text(),"贩卖")]/../td/a/text()')[0]
return result1.replace('','-').replace('','-').replace('','')
def getTag(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
try:
result = html.xpath('//th[contains(text(),"ジャンル")]/../td/div/a/text()')
result = html.xpath('//th[contains(text(),"分类")]/../td/div/a/text()')
return result
except:
return ''
@@ -85,7 +102,10 @@ def getCover(htmlcode):
return result
def getDirector(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result = html.xpath('//th[contains(text(),"シナリオ")]/../td/a/text()')[0]
try:
result = html.xpath('//th[contains(text(),"剧情")]/../td/a/text()')[0]
except:
result = ''
return result
def getOutline(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
@@ -96,36 +116,52 @@ def getOutline(htmlcode):
return str(total).strip(" ['']").replace("', '', '",r'\n').replace("', '",r'\n').strip(", '', '")
def getSeries(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = html.xpath('//th[contains(text(),"声優")]/../td/a/text()')
return result1
try:
try:
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
except:
result = html.xpath('//th[contains(text(),"社团名")]/../td/span[1]/a/text()')[0]
except:
result = ''
return result
def main(number):
number = number.upper()
htmlcode = get_html('https://www.dlsite.com/pro/work/=/product_id/' + number + '.html')
try:
number = number.upper()
htmlcode = get_html('https://www.dlsite.com/pro/work/=/product_id/' + number + '.html',
cookies={'locale': 'zh-cn'})
dic = {
'actor': getActor(htmlcode),
'title': getTitle(htmlcode),
'studio': getStudio(htmlcode),
'outline': getOutline(htmlcode),
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'release': getRelease(htmlcode),
'number': number,
'cover': 'https:' + getCover(htmlcode),
'cover_small': '',
'imagecut': 0,
'tag': getTag(htmlcode),
'label': getLabel(htmlcode),
'year': getYear(getRelease(htmlcode)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': '',
'website': 'https://www.dlsite.com/pro/work/=/product_id/' + number + '.html',
'source': 'dlsite.py',
'series': getSeries(htmlcode),
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js
dic = {
'actor': getActor(htmlcode),
'title': getTitle(htmlcode),
'studio': getStudio(htmlcode),
'outline': getOutline(htmlcode),
'runtime': '',
'director': getDirector(htmlcode),
'release': getRelease(htmlcode),
'number': number,
'cover': 'https:' + getCover(htmlcode),
'cover_small': '',
'imagecut': 0,
'tag': getTag(htmlcode),
'label': getLabel(htmlcode),
'year': getYear(getRelease(htmlcode)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': '',
'website': 'https://www.dlsite.com/pro/work/=/product_id/' + number + '.html',
'source': 'dlsite.py',
'series': getSeries(htmlcode),
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js
except:
data = {
"title": "",
}
js = json.dumps(
data, ensure_ascii=False, sort_keys=True, indent=4, separators=(",", ":")
)
return js
# main('DV-1562')
# input("[+][+]Press enter key exit, you can check the error messge before you exit.\n[+][+]按回车键结束,你可以在结束之前查看和错误信息。")
if __name__ == "__main__":
print(main('VJ013479'))
print(main('VJ013178'))

View File

@@ -1,5 +1,7 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import json
import re
from urllib.parse import urlencode

View File

@@ -1,3 +1,5 @@
import sys
sys.path.append('../')
import re
from lxml import etree#need install
import json

View File

@@ -1,3 +1,5 @@
import sys
sys.path.append('../')
import json
from bs4 import BeautifulSoup
from lxml import html

View File

@@ -1,10 +1,13 @@
import sys
sys.path.append('../')
import re
from pyquery import PyQuery as pq#need install
from lxml import etree#need install
from bs4 import BeautifulSoup#need install
import json
from ADC_function import *
import fanza
from WebCrawler import fanza
def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img
soup = BeautifulSoup(htmlcode, 'lxml')

View File

@@ -1,3 +1,5 @@
import sys
sys.path.append('../')
import re
from lxml import etree
import json

View File

@@ -1,3 +1,5 @@
import sys
sys.path.append('../')
import json
import bs4
from bs4 import BeautifulSoup

View File

@@ -1,3 +1,5 @@
import sys
sys.path.append('../')
import re
from lxml import etree
import json

View File

@@ -1,3 +1,5 @@
import sys
sys.path.append('../')
import re
from lxml import etree
import json