Merge pull request #735 from lededev/fdls
dlsite.py: update to current website
This commit is contained in:
@@ -1,15 +1,14 @@
|
|||||||
import re
|
import re
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
import json
|
import json
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
import sys
|
import sys
|
||||||
sys.path.append('../')
|
sys.path.append('../')
|
||||||
from ADC_function import *
|
from ADC_function import *
|
||||||
# import sys
|
# import sys
|
||||||
# import io
|
# import io
|
||||||
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, errors = 'replace', line_buffering = True)
|
||||||
#print(get_html('https://www.dlsite.com/pro/work/=/product_id/VJ013152.html'))
|
#print(get_html('https://www.dlsite.com/maniax/work/=/product_id/VJ013152.html'))
|
||||||
#title //*[@id="work_name"]/a/text()
|
#title /html/head/title/text()
|
||||||
#studio //th[contains(text(),"ブランド名")]/../td/span[1]/a/text()
|
#studio //th[contains(text(),"ブランド名")]/../td/span[1]/a/text()
|
||||||
#release //th[contains(text(),"販売日")]/../td/a/text()
|
#release //th[contains(text(),"販売日")]/../td/a/text()
|
||||||
#story //th[contains(text(),"シナリオ")]/../td/a/text()
|
#story //th[contains(text(),"シナリオ")]/../td/a/text()
|
||||||
@@ -18,14 +17,14 @@ from ADC_function import *
|
|||||||
#jianjie //*[@id="main_inner"]/div[3]/text()
|
#jianjie //*[@id="main_inner"]/div[3]/text()
|
||||||
#photo //*[@id="work_left"]/div/div/div[2]/div/div[1]/div[1]/ul/li/img/@src
|
#photo //*[@id="work_left"]/div/div/div[2]/div/div[1]/div[1]/ul/li/img/@src
|
||||||
|
|
||||||
#https://www.dlsite.com/pro/work/=/product_id/VJ013152.html
|
#https://www.dlsite.com/maniax/work/=/product_id/VJ013152.html
|
||||||
|
|
||||||
def getTitle(a):
|
def getTitle(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser())
|
result = str(html.xpath('/html/head/title/text()')[0])
|
||||||
result = html.xpath('//*[@id="work_name"]/a/text()')[0]
|
result = result[:result.rfind(' | DLsite')]
|
||||||
|
result = result[:result.rfind(' [')]
|
||||||
return result
|
return result
|
||||||
def getActor(a): # //*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
|
def getActor(html): # //*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result1 = html.xpath('//th[contains(text(),"声优")]/../td/a/text()')
|
result1 = html.xpath('//th[contains(text(),"声优")]/../td/a/text()')
|
||||||
except:
|
except:
|
||||||
@@ -38,8 +37,7 @@ def getActorPhoto(actor): #//*[@id="star_qdt"]/li/a/img
|
|||||||
p={i:''}
|
p={i:''}
|
||||||
d.update(p)
|
d.update(p)
|
||||||
return d
|
return d
|
||||||
def getStudio(a):
|
def getStudio(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
|
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
|
||||||
@@ -53,8 +51,7 @@ def getRuntime(a):
|
|||||||
result1 = str(html.xpath('//strong[contains(text(),"時長")]/../span/text()')).strip(" ['']")
|
result1 = str(html.xpath('//strong[contains(text(),"時長")]/../span/text()')).strip(" ['']")
|
||||||
result2 = str(html.xpath('//strong[contains(text(),"時長")]/../span/a/text()')).strip(" ['']")
|
result2 = str(html.xpath('//strong[contains(text(),"時長")]/../span/a/text()')).strip(" ['']")
|
||||||
return str(result1 + result2).strip('+').rstrip('mi')
|
return str(result1 + result2).strip('+').rstrip('mi')
|
||||||
def getLabel(a):
|
def getLabel(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
|
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
|
||||||
@@ -69,12 +66,10 @@ def getYear(getRelease):
|
|||||||
return result
|
return result
|
||||||
except:
|
except:
|
||||||
return getRelease
|
return getRelease
|
||||||
def getRelease(a):
|
def getRelease(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
result1 = html.xpath('//th[contains(text(),"贩卖日")]/../td/a/text()')[0]
|
result1 = html.xpath('//th[contains(text(),"贩卖日")]/../td/a/text()')[0]
|
||||||
return result1.replace('年','-').replace('月','-').replace('日','')
|
return result1.replace('年','-').replace('月','-').replace('日','')
|
||||||
def getTag(a):
|
def getTag(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//th[contains(text(),"分类")]/../td/div/a/text()')
|
result = html.xpath('//th[contains(text(),"分类")]/../td/div/a/text()')
|
||||||
return result
|
return result
|
||||||
@@ -96,26 +91,22 @@ def getCover_small(a, index=0):
|
|||||||
if not 'https' in result:
|
if not 'https' in result:
|
||||||
result = 'https:' + result
|
result = 'https:' + result
|
||||||
return result
|
return result
|
||||||
def getCover(htmlcode):
|
def getCover(html):
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
result = html.xpath('//*[@id="work_left"]/div/div/div[2]/div/div[1]/div[1]/ul/li[1]/picture/source/@srcset')[0]
|
||||||
result = html.xpath('//*[@id="work_left"]/div/div/div[2]/div/div[1]/div[1]/ul/li/img/@src')[0]
|
return result.replace('.webp', '.jpg')
|
||||||
return result
|
def getDirector(html):
|
||||||
def getDirector(a):
|
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//th[contains(text(),"剧情")]/../td/a/text()')[0]
|
result = html.xpath('//th[contains(text(),"剧情")]/../td/a/text()')[0]
|
||||||
except:
|
except:
|
||||||
result = ''
|
result = ''
|
||||||
return result
|
return result
|
||||||
def getOutline(htmlcode):
|
def getOutline(html):
|
||||||
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
|
||||||
total = []
|
total = []
|
||||||
result = html.xpath('//*[@id="main_inner"]/div[3]/text()')
|
result = html.xpath('//*[@class="work_parts_area"]/p/text()')
|
||||||
for i in result:
|
for i in result:
|
||||||
total.append(i.strip('\r\n'))
|
total.append(i.strip('\r\n'))
|
||||||
return str(total).strip(" ['']").replace("', '', '",r'\n').replace("', '",r'\n').strip(", '', '")
|
return str(total).strip(" ['']").replace("', '', '",r'\n').replace("', '",r'\n').strip(", '', '")
|
||||||
def getSeries(a):
|
def getSeries(html):
|
||||||
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
|
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
|
result = html.xpath('//th[contains(text(),"系列名")]/../td/span[1]/a/text()')[0]
|
||||||
@@ -127,28 +118,28 @@ def getSeries(a):
|
|||||||
def main(number):
|
def main(number):
|
||||||
try:
|
try:
|
||||||
number = number.upper()
|
number = number.upper()
|
||||||
htmlcode = get_html('https://www.dlsite.com/pro/work/=/product_id/' + number + '.html',
|
htmlcode = get_html('https://www.dlsite.com/maniax/work/=/product_id/' + number + '.html/?locale=zh_CN',
|
||||||
cookies={'locale': 'zh-cn'})
|
cookies={'locale': 'zh-cn'})
|
||||||
|
html = etree.fromstring(htmlcode, etree.HTMLParser())
|
||||||
dic = {
|
dic = {
|
||||||
'actor': getActor(htmlcode),
|
'actor': getActor(html),
|
||||||
'title': getTitle(htmlcode),
|
'title': getTitle(html),
|
||||||
'studio': getStudio(htmlcode),
|
'studio': getStudio(html),
|
||||||
'outline': getOutline(htmlcode),
|
'outline': getOutline(html),
|
||||||
'runtime': '',
|
'runtime': '',
|
||||||
'director': getDirector(htmlcode),
|
'director': getDirector(html),
|
||||||
'release': getRelease(htmlcode),
|
'release': getRelease(html),
|
||||||
'number': number,
|
'number': number,
|
||||||
'cover': 'https:' + getCover(htmlcode),
|
'cover': 'https:' + getCover(html),
|
||||||
'cover_small': '',
|
'cover_small': '',
|
||||||
'imagecut': 0,
|
'imagecut': 0,
|
||||||
'tag': getTag(htmlcode),
|
'tag': getTag(html),
|
||||||
'label': getLabel(htmlcode),
|
'label': getLabel(html),
|
||||||
'year': getYear(getRelease(htmlcode)), # str(re.search('\d{4}',getRelease(a)).group()),
|
'year': getYear(getRelease(html)), # str(re.search('\d{4}',getRelease(a)).group()),
|
||||||
'actor_photo': '',
|
'actor_photo': '',
|
||||||
'website': 'https://www.dlsite.com/pro/work/=/product_id/' + number + '.html',
|
'website': 'https://www.dlsite.com/maniax/work/=/product_id/' + number + '.html',
|
||||||
'source': 'dlsite.py',
|
'source': 'dlsite.py',
|
||||||
'series': getSeries(htmlcode),
|
'series': getSeries(html),
|
||||||
}
|
}
|
||||||
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
|
||||||
return js
|
return js
|
||||||
@@ -166,4 +157,6 @@ def main(number):
|
|||||||
# main('DV-1562')
|
# main('DV-1562')
|
||||||
# input("[+][+]Press enter key exit, you can check the error messge before you exit.\n[+][+]按回车键结束,你可以在结束之前查看和错误信息。")
|
# input("[+][+]Press enter key exit, you can check the error messge before you exit.\n[+][+]按回车键结束,你可以在结束之前查看和错误信息。")
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
config.G_conf_override["debug_mode:switch"] = True
|
||||||
print(main('VJ013178'))
|
print(main('VJ013178'))
|
||||||
|
print(main('RJ329607'))
|
||||||
|
|||||||
Reference in New Issue
Block a user