Add crawler named gcolle.py
This commit is contained in:
@@ -24,6 +24,7 @@ from . import carib
|
||||
from . import fc2club
|
||||
from . import mv91
|
||||
from . import madou
|
||||
from . import gcolle
|
||||
|
||||
|
||||
def get_data_state(data: dict) -> bool: # 元数据获取失败检测
|
||||
@@ -62,7 +63,8 @@ def get_data_from_json(file_number, oCC):
|
||||
"carib": carib.main,
|
||||
"fc2club": fc2club.main,
|
||||
"mv91": mv91.main,
|
||||
"madou": madou.main
|
||||
"madou": madou.main,
|
||||
"gcolle": gcolle.main,
|
||||
}
|
||||
|
||||
conf = config.getInstance()
|
||||
@@ -75,6 +77,8 @@ def get_data_from_json(file_number, oCC):
|
||||
if "carib" in sources and (re.match(r"^\d{6}-\d{3}", file_number)
|
||||
):
|
||||
sources.insert(0, sources.pop(sources.index("carib")))
|
||||
elif "gcolle" in sources and (re.search("\d{6}", file_number)):
|
||||
sources.insert(0, sources.pop(sources.index("gcolle")))
|
||||
elif re.match(r"^\d{5,}", file_number) or "heyzo" in lo_file_number:
|
||||
if "javdb" in sources:
|
||||
sources.insert(0, sources.pop(sources.index("javdb")))
|
||||
|
||||
99
WebCrawler/gcolle.py
Normal file
99
WebCrawler/gcolle.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import sys
|
||||
sys.path.append('../')
|
||||
|
||||
from ADC_function import *
|
||||
from lxml import etree
|
||||
from requests_html import HTMLSession
|
||||
|
||||
class Crawler:
|
||||
def __init__(self,htmlcode):
|
||||
self.html = etree.HTML(htmlcode)
|
||||
|
||||
def getString(self,_xpath):
|
||||
if _xpath == "":
|
||||
return ""
|
||||
result = self.html.xpath(_xpath)
|
||||
try:
|
||||
return result[0]
|
||||
except:
|
||||
return ""
|
||||
|
||||
def getStrings(self,_xpath):
|
||||
result = self.html.xpath(_xpath)
|
||||
try:
|
||||
return result
|
||||
except:
|
||||
return ""
|
||||
|
||||
def getOutline(self,_xpath):
|
||||
result = self.html.xpath(_xpath)
|
||||
try:
|
||||
return "\n".join(result)
|
||||
except:
|
||||
return ""
|
||||
|
||||
def main(number):
|
||||
config_file = config.getInstance()
|
||||
browser = HTMLSession()
|
||||
|
||||
number = number.upper().replace('GCOLLE-','')
|
||||
|
||||
htmlcode = get_html_requests_html(browser,'https://gcolle.net/product_info.php/products_id/' + number)
|
||||
html = etree.HTML(htmlcode)
|
||||
# R18 countinue
|
||||
htmlcode = get_html_requests_html(browser,html.xpath('//*[@id="main_content"]/table[1]/tbody/tr/td[2]/table/tbody/tr/td/h4/a[2]/@href')[0])
|
||||
gcolle_crawler = Crawler(htmlcode)
|
||||
|
||||
number_html = gcolle_crawler.getString('//td[contains(text(),"商品番号")]/../td[2]/text()')
|
||||
if number != number_html:
|
||||
if config_file.debug():
|
||||
print('[!]gcolle.py: number not match')
|
||||
return {'title':''}
|
||||
|
||||
# get extrafanart url
|
||||
if len(gcolle_crawler.getStrings('//*[@id="cart_quantity"]/table/tr[3]/td/div/img/@src')) == 0:
|
||||
extrafanart = gcolle_crawler.getStrings('//*[@id="cart_quantity"]/table/tr[3]/td/div/a/img/@src')
|
||||
else:
|
||||
extrafanart = gcolle_crawler.getStrings('//*[@id="cart_quantity"]/table/tr[3]/td/div/img/@src')
|
||||
# Add "https:" in each extrafanart url
|
||||
for i in range(len(extrafanart)):
|
||||
extrafanart[i] = 'https:' + extrafanart[i]
|
||||
|
||||
dic = {
|
||||
"title": gcolle_crawler.getString('//*[@id="cart_quantity"]/table/tr[1]/td/h1/text()'),
|
||||
"studio": gcolle_crawler.getString('//td[contains(text(),"アップロード会員名")]/b/text()'),
|
||||
"year": re.findall('\d{4}',gcolle_crawler.getString('//td[contains(text(),"商品登録日")]/../td[2]/time/@datetime'))[0],
|
||||
"outline": gcolle_crawler.getOutline('//*[@id="cart_quantity"]/table/tr[3]/td/p/text()'),
|
||||
"runtime": '',
|
||||
"director": gcolle_crawler.getString('//td[contains(text(),"アップロード会員名")]/b/text()'),
|
||||
"actor": gcolle_crawler.getString('//td[contains(text(),"アップロード会員名")]/b/text()'),
|
||||
"release": re.findall('\d{4}-\d{2}-\d{2}',gcolle_crawler.getString('//td[contains(text(),"商品登録日")]/../td[2]/time/@datetime'))[0],
|
||||
"number": "GCOLLE-" + str(number_html),
|
||||
"cover": "https:" + gcolle_crawler.getString('//*[@id="cart_quantity"]/table/tr[3]/td/table/tr/td/a/@href'),
|
||||
"thumb": "https:" + gcolle_crawler.getString('//*[@id="cart_quantity"]/table/tr[3]/td/table/tr/td/a/@href'),
|
||||
"trailer": '',
|
||||
"actor_photo":'',
|
||||
"imagecut": 4, # 该值为4时同时也是有码影片 也用人脸识别裁剪封面
|
||||
"tag": gcolle_crawler.getStrings('//*[@id="cart_quantity"]/table/tr[4]/td/a/text()'),
|
||||
"extrafanart":extrafanart,
|
||||
"label": gcolle_crawler.getString('//td[contains(text(),"アップロード会員名")]/b/text()'),
|
||||
"website": 'https://gcolle.net/product_info.php/products_id/' + number,
|
||||
"source": 'gcolle.py',
|
||||
"series": gcolle_crawler.getString('//td[contains(text(),"アップロード会員名")]/b/text()'),
|
||||
'无码': False,
|
||||
}
|
||||
# for k,v in dic.items():
|
||||
# if k == 'outline':
|
||||
# print(k,len(v))
|
||||
# else:
|
||||
# print(k,v)
|
||||
# print('===============================================================')
|
||||
return dic
|
||||
|
||||
if __name__ == '__main__':
|
||||
main('840724')
|
||||
main('840386')
|
||||
main('838671')
|
||||
main('814179')
|
||||
main('834255')
|
||||
main('814179')
|
||||
260
config.ini
260
config.ini
@@ -1,130 +1,130 @@
|
||||
# 详细教程请看
|
||||
# - https://github.com/yoshiko2/Movie_Data_Capture/wiki#%E9%85%8D%E7%BD%AEconfigini
|
||||
[common]
|
||||
main_mode=1
|
||||
source_folder=./
|
||||
failed_output_folder=failed
|
||||
success_output_folder=JAV_output
|
||||
link_mode=0
|
||||
; 0: 不刮削硬链接文件 1: 刮削硬链接文件
|
||||
scan_hardlink=0
|
||||
failed_move=1
|
||||
auto_exit=0
|
||||
translate_to_sc=0
|
||||
multi_threading=0
|
||||
;actor_gender value: female(♀) or male(♂) or both(♀ ♂) or all(♂ ♀ ⚧)
|
||||
actor_gender=female
|
||||
del_empty_folder=1
|
||||
; 跳过最近(默认:30)天新修改过的.NFO,可避免整理模式(main_mode=3)和软连接(soft_link=0)时
|
||||
; 反复刮削靠前的视频文件,0为处理所有视频文件
|
||||
nfo_skip_days=30
|
||||
; 处理完多少个视频文件后停止,0为处理所有视频文件
|
||||
stop_counter=0
|
||||
; 再运行延迟时间,单位:h时m分s秒 举例: 1h30m45s(1小时30分45秒) 45(45秒)
|
||||
; stop_counter不为零的条件下才有效,每处理stop_counter部影片后延迟rerun_delay秒再次运行
|
||||
rerun_delay=0
|
||||
; 以上三个参数配合使用可以以多次少量的方式刮削或整理数千个文件而不触发翻译或元数据站封禁
|
||||
ignore_failed_list=0
|
||||
download_only_missing_images=1
|
||||
mapping_table_validity=7
|
||||
|
||||
[proxy]
|
||||
;proxytype: http or socks5 or socks5h switch: 0 1
|
||||
switch=0
|
||||
type=socks5
|
||||
proxy=127.0.0.1:1080
|
||||
timeout=10
|
||||
retry=3
|
||||
cacert_file=
|
||||
|
||||
[Name_Rule]
|
||||
location_rule=actor+'/'+number
|
||||
naming_rule=number+'-'+title
|
||||
max_title_len=50
|
||||
|
||||
[update]
|
||||
update_check=1
|
||||
|
||||
[priority]
|
||||
website=javbus,airav,fanza,xcity,mgstage,fc2,avsox,dlsite,carib,fc2club,madou,mv91,javdb
|
||||
|
||||
[escape]
|
||||
literals=\()/
|
||||
folders=failed,JAV_output
|
||||
|
||||
[debug_mode]
|
||||
switch=0
|
||||
|
||||
; 机器翻译
|
||||
[translate]
|
||||
switch=0
|
||||
;可选项 google-free,azure
|
||||
engine=google-free
|
||||
; azure翻译密钥
|
||||
key=
|
||||
; 翻译延迟
|
||||
delay=1
|
||||
values=title,outline
|
||||
service_site=translate.google.cn
|
||||
|
||||
; 预告片
|
||||
[trailer]
|
||||
switch=0
|
||||
|
||||
; 用来确定是否是无码
|
||||
[uncensored]
|
||||
uncensored_prefix=S2M,BT,LAF,SMD,SMBD,SM3D2DBD,SKY-,SKYHD,CWP,CWDV,CWBD,CW3D2DBD,MKD,MKBD,MXBD,MK3D2DBD,MCB3DBD,MCBD,RHJ,MMDV
|
||||
|
||||
[media]
|
||||
; 影片后缀
|
||||
media_type=.mp4,.avi,.rmvb,.wmv,.mov,.mkv,.flv,.ts,.webm,.iso,.mpg,.m4v
|
||||
; 字幕后缀
|
||||
sub_type=.smi,.srt,.idx,.sub,.sup,.psb,.ssa,.ass,.usf,.xss,.ssf,.rt,.lrc,.sbv,.vtt,.ttml
|
||||
|
||||
; 水印
|
||||
[watermark]
|
||||
switch=1
|
||||
water=2
|
||||
; 左上 0, 右上 1, 右下 2, 左下 3
|
||||
|
||||
; 剧照
|
||||
[extrafanart]
|
||||
switch=1
|
||||
parallel_download=5
|
||||
extrafanart_folder=extrafanart
|
||||
|
||||
; 剧情简介
|
||||
[storyline]
|
||||
switch=1
|
||||
; website为javbus javdb avsox xcity carib时,site censored_site uncensored_site 为获取剧情简介信息的
|
||||
; 可选数据源站点列表。列表内站点同时并发查询,取值优先级由冒号前的序号决定,从小到大,数字小的站点没数据才会采用后面站点获得的。
|
||||
; 其中airavwiki airav avno1 58avgo是中文剧情简介,区别是airav只能查有码,avno1 airavwiki 有码无码都能查,
|
||||
; 58avgo只能查无码或者流出破解马赛克的影片(此功能没使用)。
|
||||
; xcity和amazon是日语的,由于amazon商城没有番号信息,选中对应DVD的准确率仅99.6%。如果三个列表全部为空则不查询,
|
||||
; 设置成不查询可大幅提高刮削速度。
|
||||
; site=
|
||||
site=1:avno1,4:airavwiki
|
||||
censored_site=2:airav,5:xcity,6:amazon
|
||||
uncensored_site=3:58avgo
|
||||
; 运行模式:0:顺序执行(最慢) 1:线程池(默认值) 2:进程池(启动开销比线程池大,并发站点越多越快)
|
||||
run_mode=1
|
||||
; show_result剧情简介调试信息 0关闭 1简略 2详细(详细部分不记入日志),剧情简介失效时可打开2查看原因
|
||||
show_result=0
|
||||
|
||||
; 繁简转换 繁简转换模式mode=0:不转换 1:繁转简 2:简转繁
|
||||
[cc_convert]
|
||||
mode=1
|
||||
vars=outline,series,studio,tag,title
|
||||
|
||||
[javdb]
|
||||
sites=38,39
|
||||
|
||||
; 人脸识别 locations_model=hog:方向梯度直方图(不太准确,速度快) cnn:深度学习模型(准确,需要GPU/CUDA,速度慢)
|
||||
; uncensored_only=0:对全部封面进行人脸识别 1:只识别无码封面,有码封面直接切右半部分
|
||||
; aways_imagecut=0:按各网站默认行为 1:总是裁剪封面,开启此项将无视[common]download_only_missing_images=1总是覆盖封面
|
||||
; 封面裁剪的宽高比可配置,公式为aspect_ratio/3。默认aspect_ratio=2.12: 适配大部分有码影片封面,前一版本默认为2/3即aspect_ratio=2
|
||||
[face]
|
||||
locations_model=hog
|
||||
uncensored_only=1
|
||||
aways_imagecut=0
|
||||
aspect_ratio=2.12
|
||||
# 详细教程请看
|
||||
# - https://github.com/yoshiko2/Movie_Data_Capture/wiki#%E9%85%8D%E7%BD%AEconfigini
|
||||
[common]
|
||||
main_mode=1
|
||||
source_folder=./
|
||||
failed_output_folder=failed
|
||||
success_output_folder=JAV_output
|
||||
link_mode=0
|
||||
; 0: 不刮削硬链接文件 1: 刮削硬链接文件
|
||||
scan_hardlink=0
|
||||
failed_move=1
|
||||
auto_exit=0
|
||||
translate_to_sc=0
|
||||
multi_threading=0
|
||||
;actor_gender value: female(♀) or male(♂) or both(♀ ♂) or all(♂ ♀ ⚧)
|
||||
actor_gender=female
|
||||
del_empty_folder=1
|
||||
; 跳过最近(默认:30)天新修改过的.NFO,可避免整理模式(main_mode=3)和软连接(soft_link=0)时
|
||||
; 反复刮削靠前的视频文件,0为处理所有视频文件
|
||||
nfo_skip_days=30
|
||||
; 处理完多少个视频文件后停止,0为处理所有视频文件
|
||||
stop_counter=0
|
||||
; 再运行延迟时间,单位:h时m分s秒 举例: 1h30m45s(1小时30分45秒) 45(45秒)
|
||||
; stop_counter不为零的条件下才有效,每处理stop_counter部影片后延迟rerun_delay秒再次运行
|
||||
rerun_delay=0
|
||||
; 以上三个参数配合使用可以以多次少量的方式刮削或整理数千个文件而不触发翻译或元数据站封禁
|
||||
ignore_failed_list=0
|
||||
download_only_missing_images=1
|
||||
mapping_table_validity=7
|
||||
|
||||
[proxy]
|
||||
;proxytype: http or socks5 or socks5h switch: 0 1
|
||||
switch=0
|
||||
type=socks5
|
||||
proxy=127.0.0.1:1080
|
||||
timeout=10
|
||||
retry=3
|
||||
cacert_file=
|
||||
|
||||
[Name_Rule]
|
||||
location_rule=actor+'/'+number
|
||||
naming_rule=number+'-'+title
|
||||
max_title_len=50
|
||||
|
||||
[update]
|
||||
update_check=1
|
||||
|
||||
[priority]
|
||||
website=javbus,airav,fanza,xcity,mgstage,fc2,avsox,dlsite,carib,fc2club,madou,mv91,javdb,gcolle
|
||||
|
||||
[escape]
|
||||
literals=\()/
|
||||
folders=failed,JAV_output
|
||||
|
||||
[debug_mode]
|
||||
switch=0
|
||||
|
||||
; 机器翻译
|
||||
[translate]
|
||||
switch=0
|
||||
;可选项 google-free,azure
|
||||
engine=google-free
|
||||
; azure翻译密钥
|
||||
key=
|
||||
; 翻译延迟
|
||||
delay=1
|
||||
values=title,outline
|
||||
service_site=translate.google.cn
|
||||
|
||||
; 预告片
|
||||
[trailer]
|
||||
switch=0
|
||||
|
||||
; 用来确定是否是无码
|
||||
[uncensored]
|
||||
uncensored_prefix=S2M,BT,LAF,SMD,SMBD,SM3D2DBD,SKY-,SKYHD,CWP,CWDV,CWBD,CW3D2DBD,MKD,MKBD,MXBD,MK3D2DBD,MCB3DBD,MCBD,RHJ,MMDV
|
||||
|
||||
[media]
|
||||
; 影片后缀
|
||||
media_type=.mp4,.avi,.rmvb,.wmv,.mov,.mkv,.flv,.ts,.webm,.iso,.mpg,.m4v
|
||||
; 字幕后缀
|
||||
sub_type=.smi,.srt,.idx,.sub,.sup,.psb,.ssa,.ass,.usf,.xss,.ssf,.rt,.lrc,.sbv,.vtt,.ttml
|
||||
|
||||
; 水印
|
||||
[watermark]
|
||||
switch=1
|
||||
water=2
|
||||
; 左上 0, 右上 1, 右下 2, 左下 3
|
||||
|
||||
; 剧照
|
||||
[extrafanart]
|
||||
switch=1
|
||||
parallel_download=5
|
||||
extrafanart_folder=extrafanart
|
||||
|
||||
; 剧情简介
|
||||
[storyline]
|
||||
switch=1
|
||||
; website为javbus javdb avsox xcity carib时,site censored_site uncensored_site 为获取剧情简介信息的
|
||||
; 可选数据源站点列表。列表内站点同时并发查询,取值优先级由冒号前的序号决定,从小到大,数字小的站点没数据才会采用后面站点获得的。
|
||||
; 其中airavwiki airav avno1 58avgo是中文剧情简介,区别是airav只能查有码,avno1 airavwiki 有码无码都能查,
|
||||
; 58avgo只能查无码或者流出破解马赛克的影片(此功能没使用)。
|
||||
; xcity和amazon是日语的,由于amazon商城没有番号信息,选中对应DVD的准确率仅99.6%。如果三个列表全部为空则不查询,
|
||||
; 设置成不查询可大幅提高刮削速度。
|
||||
; site=
|
||||
site=1:avno1,4:airavwiki
|
||||
censored_site=2:airav,5:xcity,6:amazon
|
||||
uncensored_site=3:58avgo
|
||||
; 运行模式:0:顺序执行(最慢) 1:线程池(默认值) 2:进程池(启动开销比线程池大,并发站点越多越快)
|
||||
run_mode=1
|
||||
; show_result剧情简介调试信息 0关闭 1简略 2详细(详细部分不记入日志),剧情简介失效时可打开2查看原因
|
||||
show_result=0
|
||||
|
||||
; 繁简转换 繁简转换模式mode=0:不转换 1:繁转简 2:简转繁
|
||||
[cc_convert]
|
||||
mode=1
|
||||
vars=outline,series,studio,tag,title
|
||||
|
||||
[javdb]
|
||||
sites=38,39
|
||||
|
||||
; 人脸识别 locations_model=hog:方向梯度直方图(不太准确,速度快) cnn:深度学习模型(准确,需要GPU/CUDA,速度慢)
|
||||
; uncensored_only=0:对全部封面进行人脸识别 1:只识别无码封面,有码封面直接切右半部分
|
||||
; aways_imagecut=0:按各网站默认行为 1:总是裁剪封面,开启此项将无视[common]download_only_missing_images=1总是覆盖封面
|
||||
; 封面裁剪的宽高比可配置,公式为aspect_ratio/3。默认aspect_ratio=2.12: 适配大部分有码影片封面,前一版本默认为2/3即aspect_ratio=2
|
||||
[face]
|
||||
locations_model=hog
|
||||
uncensored_only=1
|
||||
aways_imagecut=0
|
||||
aspect_ratio=2.12
|
||||
|
||||
Reference in New Issue
Block a user