11 Commits

Author SHA1 Message Date
wenead99
02da503a2f Update update_check.json 2019-06-20 19:13:38 +08:00
wenead99
31c5d5c314 Update update_check.json 2019-06-20 19:10:28 +08:00
wenead99
22e5b9aa44 Update update_check.json 2019-06-20 19:07:42 +08:00
wenead99
400e8c9678 Update update_check.json 2019-06-20 19:03:24 +08:00
wenead99
b06e744c0c Beta 0.10.3更新检测 2019-06-19 20:53:10 +08:00
wenead99
ddbfe7765b Beta 10.3更新检测 2019-06-19 20:50:44 +08:00
wenead99
c0f47fb712 Update README.md 2019-06-19 18:22:31 +08:00
wenead99
7b0e8bf5f7 Beta 10.2 Update 2019-06-19 18:21:19 +08:00
wenead99
fa8ea58fe6 Beta 10.2 Update 2019-06-19 18:20:30 +08:00
wenead99
8c824e5d29 Beta 10.2 Update 2019-06-19 18:20:02 +08:00
wenead99
764fba74ec Beta 10.2 Update 2019-06-19 18:19:34 +08:00
9 changed files with 82 additions and 62 deletions

View File

@@ -1,8 +1,15 @@
import requests
from configparser import ConfigParser
from configparser import RawConfigParser
import os
import re
config = ConfigParser()
# content = open('proxy.ini').read()
# content = re.sub(r"\xfe\xff","", content)
# content = re.sub(r"\xff\xfe","", content)
# content = re.sub(r"\xef\xbb\xbf","", content)
# open('BaseConfig.cfg', 'w').write(content)
config = RawConfigParser()
if os.path.exists('proxy.ini'):
config.read('proxy.ini', encoding='UTF-8')
else:
@@ -10,14 +17,14 @@ else:
print("[proxy]",file=code)
print("proxy=127.0.0.1:1080",file=code)
def get_html(url):#网页请求核心
def get_html(url,cookies = None):#网页请求核心
if not str(config['proxy']['proxy']) == '':
proxies = {
"http" : "http://" + str(config['proxy']['proxy']),
"https": "https://" + str(config['proxy']['proxy'])
}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36'}
getweb = requests.get(str(url), headers=headers, proxies=proxies)
getweb = requests.get(str(url), headers=headers, proxies=proxies,cookies=cookies)
getweb.encoding = 'utf-8'
# print(getweb.text)
try:
@@ -27,7 +34,7 @@ def get_html(url):#网页请求核心
else:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
getweb = requests.get(str(url), headers=headers)
getweb = requests.get(str(url), headers=headers,cookies=cookies)
getweb.encoding = 'utf-8'
try:
return getweb.text

View File

@@ -19,8 +19,10 @@ def movie_lists():
f2 = glob.glob(os.getcwd() + r"\*.mkv")
# FLV
g2 = glob.glob(os.getcwd() + r"\*.flv")
# TS
h2 = glob.glob(os.getcwd() + r"\*.ts")
total = a2+b2+c2+d2+e2+f2+g2
total = a2+b2+c2+d2+e2+f2+g2+h2
return total
def lists_from_test(custom_nuber): #电影列表

View File

@@ -61,7 +61,7 @@ pip install pillow
>影片命名(上面目录之下的文件):'['+number+']-'+title
## 3. 关于番号提取失败或者异常
**目前可以提取元素的影片:JAVBUS上有元数据的电影素人系列:300Maan,326scp,326urf,259luxu,siro系列,FC2系列**<br>
**目前可以提取元素的影片:JAVBUS上有元数据的电影素人系列(需要日本代理):300Maan,326scp,326urf,259luxu,siro,FC2系列**<br>
>下一张图片来自Pockies的blog:https://pockies.github.io/2019/03/25/everaver-emby-kodi/ 原作者已授权<br>
![](https://raw.githubusercontent.com/Pockies/pic/master/741f9461gy1g1cxc31t41j20i804zdgo.jpg)

View File

@@ -299,7 +299,7 @@ def cutImage():
h = img.height
img.save(path + '/' + naming_rule + '.png')
def pasteFileToFolder(filepath, path): #文件路径,番号,后缀,要移动至的位置
houzhui = str(re.search('[.](AVI|RMVB|WMV|MOV|MP4|MKV|FLV|avi|rmvb|wmv|mov|mp4|mkv|flv)$', filepath).group())
houzhui = str(re.search('[.](AVI|RMVB|WMV|MOV|MP4|MKV|FLV|TS|avi|rmvb|wmv|mov|mp4|mkv|flv|ts)$', filepath).group())
os.rename(filepath, naming_rule + houzhui)
shutil.move(naming_rule + houzhui, path)

View File

@@ -38,8 +38,8 @@ def getOutline(htmlcode,number): #获取番号
# result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[4]/p/text()')).replace("\\n",'',10000).strip(" ['']").replace("'",'',10000)
# return result
def main(number):
str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-")
def main(number2):
number=number2.replace('PPV','').replace('ppv','')
htmlcode = ADC_function.get_html('http://fc2fans.club/html/FC2-' + number + '.html')
dic = {
'title': getTitle(htmlcode),

View File

@@ -9,14 +9,7 @@ from bs4 import BeautifulSoup#need install
from PIL import Image#need install
import time
import json
def get_html(url):#网页请求核心
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
getweb = requests.get(str(url),timeout=10,headers=headers).text
try:
return getweb
except:
print("[-]Connect Failed! Please check your Proxy.")
from ADC_function import *
def getTitle(htmlcode): #获取标题
doc = pq(htmlcode)
@@ -34,7 +27,6 @@ def getCover(htmlcode): #获取封面链接
doc = pq(htmlcode)
image = doc('a.bigImage')
return image.attr('href')
print(image.attr('href'))
def getRelease(htmlcode): #获取出版日期
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']")
@@ -62,8 +54,10 @@ def getOutline(htmlcode): #获取演员
doc = pq(htmlcode)
result = str(doc('tr td div.mg-b20.lh4 p.mg-b20').text())
return result
def getSerise(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[7]/a/text()')).strip(" ['']")
return result
def getTag(htmlcode): # 获取演员
tag = []
soup = BeautifulSoup(htmlcode, 'lxml')
@@ -79,7 +73,7 @@ def main(number):
htmlcode=get_html('https://www.javbus.com/'+number)
dww_htmlcode=get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = {
'title': getTitle(htmlcode),
'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))),
'studio': getStudio(htmlcode),
'year': str(re.search('\d{4}',getYear(htmlcode)).group()),
'outline': getOutline(dww_htmlcode),
@@ -90,7 +84,8 @@ def main(number):
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'imagecut': 1,
'tag': getTag(htmlcode)
'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
@@ -98,7 +93,7 @@ def main(number):
htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = {
'title': getTitle(htmlcode),
'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))),
'studio': getStudio(htmlcode),
'year': getYear(htmlcode),
'outline': getOutline(dww_htmlcode),
@@ -109,7 +104,8 @@ def main(number):
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'imagecut': 1,
'tag': getTag(htmlcode)
'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
}
js2 = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js2
@@ -118,11 +114,12 @@ def main(number):
def main_uncensored(number):
htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = {
'title': getTitle(htmlcode),
'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))),
'studio': getStudio(htmlcode),
'year': getYear(htmlcode),
'outline': getOutline(htmlcode),
'outline': getOutline(dww_htmlcode),
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'actor': getActor(htmlcode),
@@ -130,6 +127,7 @@ def main_uncensored(number):
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
'imagecut': 0,
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
@@ -138,7 +136,7 @@ def main_uncensored(number):
number2 = number.replace('-', '_')
htmlcode = get_html('https://www.javbus.com/' + number2)
dic2 = {
'title': getTitle(htmlcode),
'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))),
'studio': getStudio(htmlcode),
'year': getYear(htmlcode),
'outline': '',
@@ -149,6 +147,7 @@ def main_uncensored(number):
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'tag': getTag(htmlcode),
'label':getSerise(htmlcode),
'imagecut': 0,
}
js2 = json.dumps(dic2, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')

View File

@@ -2,5 +2,5 @@
proxy=127.0.0.1:1080
[Name_Rule]
location_rule='JAV_output/'+actor+'/'+title
location_rule='JAV_output/'+actor+'/'+'['+number+']-'+title
naming_rule=number

69
siro.py
View File

@@ -3,70 +3,74 @@ from lxml import etree
import json
import requests
from bs4 import BeautifulSoup
def get_html(url):#网页请求核心
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
cookies = {'adc':'1'}
getweb = requests.get(str(url),timeout=10,cookies=cookies,headers=headers).text
try:
return getweb
except:
print("[-]Connect Failed! Please check your Proxy.")
from ADC_function import *
def getTitle(a):
html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('//*[@id="center_column"]/div[2]/h1/text()')).strip(" ['']")
return result
def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[1]/td/a/text()')).strip(" ['\\n ']")
return result
html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
result2=str(html.xpath('//table/tr[1]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[1]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1+result2).strip('+')
def getStudio(a):
html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[2]/td/a/text()')).strip(" ['\\n ']")
return result
result2=str(html.xpath('//table[2]/tr[2]/td/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[2]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1+result2).strip('+')
def getRuntime(a):
html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[3]/td/text()')).strip(" ['\\n ']")
return result
result2=str(html.xpath('//table/tr[3]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[3]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+').strip('mi')
def getLabel(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table/tr[6]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[6]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getNum(a):
html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[4]/td/text()')).strip(" ['\\n ']")
return result
result2=str(html.xpath('//table/tr[2]/td[4]/a/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[2]/td[4]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getYear(a):
html = etree.fromstring(a, etree.HTMLParser())
#result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']")
result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']")
return result
result2=str(html.xpath('//table/tr[2]/td[5]/a/text()')).strip(" ['\\n ']")
result1=str(html.xpath('//table/tr[2]/td[5]/text()')).strip(" ['\\n ']")
return result2+result1
def getRelease(a):
html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']")
return result
result2=str(html.xpath('//table/tr[5]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[5]/a/td[1]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getTag(a):
html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[9]/td/text()')).strip(" ['\\n ']")
return result
result2=str(html.xpath('//table/tr[8]/td[1]/a/text()')).strip(" ['\\n ']")
result1=str(html.xpath('//table/tr[8]/td[1]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getCover(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="center_column"]/div[2]/div[1]/div/div/h2/img/@src')).strip(" ['']")
return result
def getDirector(a):
html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('//table[2]/tr[7]/td/a/text()')).strip(" ['\\n ']")
return result
result1 = str(html.xpath('//table/tr[2]/td[1]/text()')).strip(" ['\\n ']")
result2 = str(html.xpath('//table/tr[2]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getOutline(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="introduction"]/dd/p[1]/text()')).strip(" ['']")
return result
def main(number):
htmlcode=get_html('https://www.mgstage.com/product/product_detail/'+str(number))
htmlcode=get_html('https://www.mgstage.com/product/product_detail/'+str(number),cookies={'adc':'1'})
soup = BeautifulSoup(htmlcode, 'lxml')
a = str(soup.find(attrs={'class': 'detail_data'})).replace('\n ','')
#print(a)
dic = {
'title': getTitle(htmlcode).replace("\\n",'').replace(' ',''),
'studio': getStudio(a),
'year': getYear(a),
'year': str(re.search('\d{4}',getRelease(a)).group()),
'outline': getOutline(htmlcode),
'runtime': getRuntime(a),
'director': getDirector(a),
@@ -75,7 +79,10 @@ def main(number):
'number': number,
'cover': getCover(htmlcode),
'imagecut': 0,
'tag':' ',
'tag': getTag(a).replace("'\\n',",'').replace(' ', '').replace("\\n','\\n",','),
'label':getLabel(a)
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
#print('https://www.mgstage.com/product/product_detail/'+str(number))
return js
#print(main('SIRO-3552'))

5
update_check.json Normal file
View File

@@ -0,0 +1,5 @@
{
"version": "0.10.3",
"version_show":"Beta 10.3",
"download": "https://github.com/wenead99/AV_Data_Capture/releases"
}