48 Commits
0.10 ... 0.10.3

Author SHA1 Message Date
wenead99
02da503a2f Update update_check.json 2019-06-20 19:13:38 +08:00
wenead99
31c5d5c314 Update update_check.json 2019-06-20 19:10:28 +08:00
wenead99
22e5b9aa44 Update update_check.json 2019-06-20 19:07:42 +08:00
wenead99
400e8c9678 Update update_check.json 2019-06-20 19:03:24 +08:00
wenead99
b06e744c0c Beta 0.10.3更新检测 2019-06-19 20:53:10 +08:00
wenead99
ddbfe7765b Beta 10.3更新检测 2019-06-19 20:50:44 +08:00
wenead99
c0f47fb712 Update README.md 2019-06-19 18:22:31 +08:00
wenead99
7b0e8bf5f7 Beta 10.2 Update 2019-06-19 18:21:19 +08:00
wenead99
fa8ea58fe6 Beta 10.2 Update 2019-06-19 18:20:30 +08:00
wenead99
8c824e5d29 Beta 10.2 Update 2019-06-19 18:20:02 +08:00
wenead99
764fba74ec Beta 10.2 Update 2019-06-19 18:19:34 +08:00
wenead99
36c436772c Update README.md 2019-06-19 13:43:04 +08:00
wenead99
897a621adc Update README.md 2019-06-19 13:42:19 +08:00
wenead99
1f5802cdb4 Update README.md 2019-06-19 13:41:05 +08:00
wenead99
0a57e2bab6 Update README.md 2019-06-19 11:03:44 +08:00
wenead99
3ddfe94f2b Update README.md 2019-06-19 11:02:31 +08:00
wenead99
c6fd5ac565 Update README.md 2019-06-19 00:05:01 +08:00
wenead99
2a7cdcf12d Update README.md 2019-06-18 23:56:34 +08:00
wenead99
759e546534 Beta 10.1 修复FC2元数据提取异常 2019-06-18 18:11:04 +08:00
wenead99
222337a5f0 修改FC2提取异常 2019-06-18 18:02:01 +08:00
wenead99
9fb6122a9d Update AV_Data_Capture.py 2019-06-18 16:58:32 +08:00
wenead99
9f0c01d62e Update README.md 2019-06-18 16:57:39 +08:00
wenead99
6ed79d8fcb Update README.md 2019-06-18 16:56:22 +08:00
wenead99
abb53c3219 Update README.md 2019-06-18 16:55:43 +08:00
wenead99
6578d807ca Update README.md 2019-06-18 16:55:10 +08:00
wenead99
e9acd32fd7 Update README.md 2019-06-18 16:54:49 +08:00
wenead99
0c64165b49 Update README.md 2019-06-18 16:53:45 +08:00
wenead99
6278659e55 Update README.md 2019-06-18 16:53:11 +08:00
wenead99
ca2c97a98f Update README.md 2019-06-17 23:45:00 +08:00
wenead99
164cc464dc Update README.md 2019-06-17 23:40:17 +08:00
wenead99
faa99507ad Update README.md 2019-06-17 19:11:54 +08:00
wenead99
d7a48d2829 Update README.md 2019-06-17 19:11:35 +08:00
wenead99
c40936f1c4 Update README.md 2019-06-17 19:10:22 +08:00
wenead99
38b26d4161 Update README.md 2019-06-17 19:09:55 +08:00
wenead99
e17dffba4e Update README.md 2019-06-17 18:34:26 +08:00
wenead99
ae1a91bf28 Update README.md 2019-06-17 18:31:46 +08:00
wenead99
208c24b606 Update README.md 2019-06-17 18:31:10 +08:00
wenead99
751450ebad Update README.md 2019-06-17 18:30:46 +08:00
wenead99
e429ca3c7d Update README.md 2019-06-17 18:29:31 +08:00
wenead99
9e26558666 Update README.md 2019-06-17 18:26:11 +08:00
wenead99
759b30ec5c Update README.md 2019-06-17 18:24:20 +08:00
wenead99
b7c195b76e Update README.md 2019-06-17 18:17:37 +08:00
wenead99
7038fcf8ed Update README.md 2019-06-17 18:12:38 +08:00
wenead99
54041313dc Add files via upload 2019-06-17 18:04:04 +08:00
wenead99
47a29f6628 Update README.md 2019-06-17 18:03:14 +08:00
wenead99
839610d230 Update README.md 2019-06-17 16:53:03 +08:00
wenead99
a0b324c1a8 Update README.md 2019-06-17 16:52:23 +08:00
wenead99
1996807702 Add files via upload 2019-06-17 16:28:07 +08:00
9 changed files with 133 additions and 116 deletions

View File

@@ -1,8 +1,15 @@
import requests import requests
from configparser import ConfigParser from configparser import RawConfigParser
import os import os
import re
config = ConfigParser() # content = open('proxy.ini').read()
# content = re.sub(r"\xfe\xff","", content)
# content = re.sub(r"\xff\xfe","", content)
# content = re.sub(r"\xef\xbb\xbf","", content)
# open('BaseConfig.cfg', 'w').write(content)
config = RawConfigParser()
if os.path.exists('proxy.ini'): if os.path.exists('proxy.ini'):
config.read('proxy.ini', encoding='UTF-8') config.read('proxy.ini', encoding='UTF-8')
else: else:
@@ -10,14 +17,14 @@ else:
print("[proxy]",file=code) print("[proxy]",file=code)
print("proxy=127.0.0.1:1080",file=code) print("proxy=127.0.0.1:1080",file=code)
def get_html(url):#网页请求核心 def get_html(url,cookies = None):#网页请求核心
if not str(config['proxy']['proxy']) == '': if not str(config['proxy']['proxy']) == '':
proxies = { proxies = {
"http" : "http://" + str(config['proxy']['proxy']), "http" : "http://" + str(config['proxy']['proxy']),
"https": "https://" + str(config['proxy']['proxy']) "https": "https://" + str(config['proxy']['proxy'])
} }
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36'} headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36'}
getweb = requests.get(str(url), headers=headers, proxies=proxies) getweb = requests.get(str(url), headers=headers, proxies=proxies,cookies=cookies)
getweb.encoding = 'utf-8' getweb.encoding = 'utf-8'
# print(getweb.text) # print(getweb.text)
try: try:
@@ -27,7 +34,7 @@ def get_html(url):#网页请求核心
else: else:
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
getweb = requests.get(str(url), headers=headers) getweb = requests.get(str(url), headers=headers,cookies=cookies)
getweb.encoding = 'utf-8' getweb.encoding = 'utf-8'
try: try:
return getweb.text return getweb.text

View File

@@ -19,8 +19,10 @@ def movie_lists():
f2 = glob.glob(os.getcwd() + r"\*.mkv") f2 = glob.glob(os.getcwd() + r"\*.mkv")
# FLV # FLV
g2 = glob.glob(os.getcwd() + r"\*.flv") g2 = glob.glob(os.getcwd() + r"\*.flv")
# TS
h2 = glob.glob(os.getcwd() + r"\*.ts")
total = a2+b2+c2+d2+e2+f2+g2 total = a2+b2+c2+d2+e2+f2+g2+h2
return total return total
def lists_from_test(custom_nuber): #电影列表 def lists_from_test(custom_nuber): #电影列表
@@ -51,8 +53,8 @@ if __name__ =='__main__':
if '_' in i: if '_' in i:
os.rename(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), rreplace(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), '_', '-', 1)) os.rename(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), rreplace(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), '_', '-', 1))
i = rreplace(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), '_', '-', 1) i = rreplace(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), '_', '-', 1)
#os.system('python core.py' + ' "' + i + '"') #选择从py文件启动 用于源码py os.system('python core.py' + ' "' + i + '"') #选择从py文件启动 用于源码py
os.system('core.exe' + ' "' + i + '"') #选择从exe文件启动用于EXE版程序 #os.system('core.exe' + ' "' + i + '"') #选择从exe文件启动用于EXE版程序
print("[*]=====================================") print("[*]=====================================")
print("[!]Cleaning empty folders") print("[!]Cleaning empty folders")

View File

@@ -1,36 +1,26 @@
<h1 style="text-align:center">日本AV元数据抓取工具</h1> ## 前言
&emsp;&emsp;目前我下的AV越来越多也意味着AV要集中地管理形成媒体库。现在有两款主流的AV元数据获取器"EverAver"和"Javhelper"。前者的优点是元数据获取比较全,缺点是不能批量处理;后者优点是可以批量处理,但是元数据不够全。<br>
## 关于本软件 ~路star谢谢 &emsp;&emsp;为此综合上述软件特点我写出了本软件为了方便的管理本地AV和更好的手冲体验。
目前我下的AV越来越多也意味着AV要集中地管理形成媒体库。现在有两款主流的AV元数据获取器"EverAver"和"Javhelper"。前者的优点是元数据获取比较全,缺点是不能批量处理;后者优点是可以批量处理,但是元数据不够全。
为此综合上述软件特点我写出了本软件为了方便的管理本地AV和更好的手冲体验。没女朋友怎么办ʅ(‾◡◝)ʃ
**tg官方电报群:https://t.me/AV_Data_Capture_Official**
### **请认真阅读下面使用说明再使用** * [如何使用](#如何使用)
**可以结合pockies大神的[ 打造本地AV毛片媒体库 ](https://pockies.github.io/2019/03/25/everaver-emby-kodi/)看本文档**<br>
**tg官方电报群:[ 点击进群](https://t.me/AV_Data_Capture_Official)**<br>
**推荐用法: 按照 [如何使用](#如何使用) 使用该软件后,对于不能正常获取元数据的电影可以用[ Everaver ](http://everaver.blogspot.com/)来补救**<br>
![](https://i.loli.net/2019/06/02/5cf2b5d0bbecf69019.png) ![](https://i.loli.net/2019/06/02/5cf2b5d0bbecf69019.png)
## 软件流程图
![](https://i.loli.net/2019/06/02/5cf2bb9a9e2d997635.png)
# 如何使用 # 如何使用
--------------- release的程序可脱离python环境运行可跳过第一步<br>
release的程序可脱离python环境运行可跳过第一步 下载地址(仅限Windows):https://github.com/wenead99/AV_Data_Capture/releases
### 简要教程:<br>
**1.把软件拉到和电影的同一目录<br>2.设置ini文件的代理<br>3.运行软件等待完成<br>4.把JAV_output导入至KODI,EMBY中。<br>详细请看以下教程**
下载地址(Windows):https://github.com/wenead99/AV_Data_Capture/releases ## 1.请安装模块,在CMD/终端逐条输入以下命令安装
## 1. 请安装requests,pyquery,lxml,Beautifulsoup4,pillow模块,在 CMD/终端 逐条输入以下命令安装
```python ```python
pip install requests pip install requests
``` ```
### ###
```python ```python
pip install pyquery pip install pyquery
``` ```
### ###
```python ```python
pip install lxml pip install lxml
@@ -43,46 +33,62 @@ pip install Beautifulsoup4
```python ```python
pip install pillow pip install pillow
``` ```
###
## 2. 设置本地代理 ## 2. 配置
**针对网络审查国家或地区** #### 1.针对网络审查国家或地区
打开```proxy.ini```,在```[proxy]```下的```proxy```行设置本地代理地址和端口支持Shadowsocks/R,V2RAY本地代理端口: 打开```proxy.ini```,在```[proxy]```下的```proxy```行设置本地代理地址和端口支持Shadowsocks/R,V2RAY本地代理端口:<br>
```proxy=127.0.0.1:1080``` 例子:```proxy=127.0.0.1:1080```<br>
**如果遇到tineout错误可以把文件的proxy=后面的地址和端口删除并开启vpn全局模式或者重启电脑vpn网卡**
#### 2.(可选)设置自定义目录和影片重命名规则
**已有默认配置**<br>
##### 命名参数<br>
>title = 片名<br>
>actor = 演员<br>
>studio = 公司<br>
>director = 导演<br>
>release = 发售日<br>
>year = 发行年份<br>
>number = 番号<br>
>cover = 封面链接<br>
>tag = 类型<br>
>outline = 简介<br>
>runtime = 时长<br>
##### **例子**:<br>
>目录结构:'JAV_output/'+actor+'/'+actor+' '+' ['+year+']'+title+' ['+number+']'<br>
>影片命名(上面目录之下的文件):'['+number+']-'+title
## 3. 关于番号提取失败或者异常 ## 3. 关于番号提取失败或者异常
>下一张图片来自Pockies的blog:https://pockies.github.io/2019/03/25/everaver-emby-kodi/ 原作者已授权 **目前可以提取元素的影片:JAVBUS上有元数据的电影素人系列(需要日本代理):300Maan,326scp,326urf,259luxu,siro,FC2系列**<br>
>下一张图片来自Pockies的blog:https://pockies.github.io/2019/03/25/everaver-emby-kodi/ 原作者已授权<br>
![](https://raw.githubusercontent.com/Pockies/pic/master/741f9461gy1g1cxc31t41j20i804zdgo.jpg) ![](https://raw.githubusercontent.com/Pockies/pic/master/741f9461gy1g1cxc31t41j20i804zdgo.jpg)
目前作者已经完善了番号提取机制,功能较为强大,可提取上述文件名的的番号,如果出现提取失败或者异常的情况,请用以下规则命名<br>
**目前作者已经完善了番号提取机制,功能较为强大,可提取上述文件名的的番号,如果出现提取失败或者异常的情况,请用以下规则命名** **妈蛋不要喂软件那么多野鸡片子,不让软件好好活了,操**
``` ```
COSQ-004.mp4 COSQ-004.mp4
``` ```
>文件名中间要有下划线或者减号"_","-",没有多余的内容只有番号为最佳,可以让软件更好获取元数据 文件名中间要有下划线或者减号"_","-",没有多余的内容只有番号为最佳,可以让软件更好获取元数据
>对于多影片重命名可以用ReNamer来批量重命名 对于多影片重命名,可以用[ReNamer](http://www.den4b.com/products/renamer)来批量重命名
>软件官网:http://www.den4b.com/products/renamer
## 4. 把软件拷贝和AV的统一目录下 ## 4. 把软件拷贝和AV的统一目录下
## 5. 运行 ```AV_Data_capture.py/.exe``` ## 5. 运行 ```AV_Data_capture.py/.exe```
>**你也可以把单个影片拖动到core程序** 你也可以把单个影片拖动到core程序<br>
![](https://i.loli.net/2019/06/02/5cf2b5d03640e73201.gif) ![](https://i.loli.net/2019/06/02/5cf2b5d03640e73201.gif)
## 6. 软件会自动把元数据获取成功的电影移动到JAV_output文件夹中根据女优分类失败的电影移动到failed文件夹中。 ## 6. 软件会自动把元数据获取成功的电影移动到JAV_output文件夹中根据女优分类失败的电影移动到failed文件夹中。
## 7. 把JAV_output文件夹导入到EMBY,KODI中根据封面选片子享受手冲乐趣 ## 7. 把JAV_output文件夹导入到EMBY,KODI中根据封面选片子享受手冲乐趣
## 8.输出的文件如下 ## 8.输出的文件如下
![](https://i.loli.net/2019/06/02/5cf2b5cfd1b0226763.png) ![](https://i.loli.net/2019/06/02/5cf2b5cfd1b0226763.png)
![](https://i.loli.net/2019/06/02/5cf2b5cfd1b0246492.png) ![](https://i.loli.net/2019/06/02/5cf2b5cfd1b0246492.png)
![](https://i.loli.net/2019/06/02/5cf2b5d009e4930666.png) ![](https://i.loli.net/2019/06/02/5cf2b5d009e4930666.png)
## 软件流程图
![](https://i.loli.net/2019/06/02/5cf2bb9a9e2d997635.png)

View File

@@ -107,9 +107,9 @@ def getNumberFromFilename(filepath):
except: #添加 无需 正则表达式的规则 except: #添加 无需 正则表达式的规则
# ====================fc2fans_club.py=================== # ====================fc2fans_club.py===================
if 'fc2' in filename: if 'fc2' in filename:
json_data = json.loads(fc2fans_club.main(file_number)) json_data = json.loads(fc2fans_club.main(file_number.strip('fc2_').strip('fc2-')))
elif 'FC2' in filename: elif 'FC2' in filename:
json_data = json.loads(fc2fans_club.main(file_number)) json_data = json.loads(fc2fans_club.main(file_number.strip('FC2_').strip('FC2-')))
#========================siro.py======================== #========================siro.py========================
elif 'siro' in filename: elif 'siro' in filename:
@@ -299,7 +299,7 @@ def cutImage():
h = img.height h = img.height
img.save(path + '/' + naming_rule + '.png') img.save(path + '/' + naming_rule + '.png')
def pasteFileToFolder(filepath, path): #文件路径,番号,后缀,要移动至的位置 def pasteFileToFolder(filepath, path): #文件路径,番号,后缀,要移动至的位置
houzhui = str(re.search('[.](AVI|RMVB|WMV|MOV|MP4|MKV|FLV|avi|rmvb|wmv|mov|mp4|mkv|flv)$', filepath).group()) houzhui = str(re.search('[.](AVI|RMVB|WMV|MOV|MP4|MKV|FLV|TS|avi|rmvb|wmv|mov|mp4|mkv|flv|ts)$', filepath).group())
os.rename(filepath, naming_rule + houzhui) os.rename(filepath, naming_rule + houzhui)
shutil.move(naming_rule + houzhui, path) shutil.move(naming_rule + houzhui, path)

View File

@@ -7,7 +7,8 @@ def getTitle(htmlcode): #获取厂商
#print(htmlcode) #print(htmlcode)
html = etree.fromstring(htmlcode,etree.HTMLParser()) html = etree.fromstring(htmlcode,etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div/div[1]/h3/text()')).strip(" ['']") result = str(html.xpath('/html/body/div[2]/div/div[1]/h3/text()')).strip(" ['']")
return result result2 = str(re.sub('\D{2}2-\d+','',result)).replace(' ','',1)
return result2
def getStudio(htmlcode): #获取厂商 def getStudio(htmlcode): #获取厂商
html = etree.fromstring(htmlcode,etree.HTMLParser()) html = etree.fromstring(htmlcode,etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div/div[1]/h5[3]/a[1]/text()')).strip(" ['']") result = str(html.xpath('/html/body/div[2]/div/div[1]/h5[3]/a[1]/text()')).strip(" ['']")
@@ -37,8 +38,8 @@ def getOutline(htmlcode,number): #获取番号
# result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[4]/p/text()')).replace("\\n",'',10000).strip(" ['']").replace("'",'',10000) # result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[4]/p/text()')).replace("\\n",'',10000).strip(" ['']").replace("'",'',10000)
# return result # return result
def main(number): def main(number2):
str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-") number=number2.replace('PPV','').replace('ppv','')
htmlcode = ADC_function.get_html('http://fc2fans.club/html/FC2-' + number + '.html') htmlcode = ADC_function.get_html('http://fc2fans.club/html/FC2-' + number + '.html')
dic = { dic = {
'title': getTitle(htmlcode), 'title': getTitle(htmlcode),

View File

@@ -9,14 +9,7 @@ from bs4 import BeautifulSoup#need install
from PIL import Image#need install from PIL import Image#need install
import time import time
import json import json
from ADC_function import *
def get_html(url):#网页请求核心
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
getweb = requests.get(str(url),timeout=10,headers=headers).text
try:
return getweb
except:
print("[-]Connect Failed! Please check your Proxy.")
def getTitle(htmlcode): #获取标题 def getTitle(htmlcode): #获取标题
doc = pq(htmlcode) doc = pq(htmlcode)
@@ -34,7 +27,6 @@ def getCover(htmlcode): #获取封面链接
doc = pq(htmlcode) doc = pq(htmlcode)
image = doc('a.bigImage') image = doc('a.bigImage')
return image.attr('href') return image.attr('href')
print(image.attr('href'))
def getRelease(htmlcode): #获取出版日期 def getRelease(htmlcode): #获取出版日期
html = etree.fromstring(htmlcode, etree.HTMLParser()) html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']") result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']")
@@ -62,8 +54,10 @@ def getOutline(htmlcode): #获取演员
doc = pq(htmlcode) doc = pq(htmlcode)
result = str(doc('tr td div.mg-b20.lh4 p.mg-b20').text()) result = str(doc('tr td div.mg-b20.lh4 p.mg-b20').text())
return result return result
def getSerise(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[7]/a/text()')).strip(" ['']")
return result
def getTag(htmlcode): # 获取演员 def getTag(htmlcode): # 获取演员
tag = [] tag = []
soup = BeautifulSoup(htmlcode, 'lxml') soup = BeautifulSoup(htmlcode, 'lxml')
@@ -79,7 +73,7 @@ def main(number):
htmlcode=get_html('https://www.javbus.com/'+number) htmlcode=get_html('https://www.javbus.com/'+number)
dww_htmlcode=get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", '')) dww_htmlcode=get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = { dic = {
'title': getTitle(htmlcode), 'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))),
'studio': getStudio(htmlcode), 'studio': getStudio(htmlcode),
'year': str(re.search('\d{4}',getYear(htmlcode)).group()), 'year': str(re.search('\d{4}',getYear(htmlcode)).group()),
'outline': getOutline(dww_htmlcode), 'outline': getOutline(dww_htmlcode),
@@ -90,7 +84,8 @@ def main(number):
'number': getNum(htmlcode), 'number': getNum(htmlcode),
'cover': getCover(htmlcode), 'cover': getCover(htmlcode),
'imagecut': 1, 'imagecut': 1,
'tag': getTag(htmlcode) 'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
@@ -98,7 +93,7 @@ def main(number):
htmlcode = get_html('https://www.javbus.com/' + number) htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", '')) dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = { dic = {
'title': getTitle(htmlcode), 'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))),
'studio': getStudio(htmlcode), 'studio': getStudio(htmlcode),
'year': getYear(htmlcode), 'year': getYear(htmlcode),
'outline': getOutline(dww_htmlcode), 'outline': getOutline(dww_htmlcode),
@@ -109,7 +104,8 @@ def main(number):
'number': getNum(htmlcode), 'number': getNum(htmlcode),
'cover': getCover(htmlcode), 'cover': getCover(htmlcode),
'imagecut': 1, 'imagecut': 1,
'tag': getTag(htmlcode) 'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
} }
js2 = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js2 = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js2 return js2
@@ -118,11 +114,12 @@ def main(number):
def main_uncensored(number): def main_uncensored(number):
htmlcode = get_html('https://www.javbus.com/' + number) htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = { dic = {
'title': getTitle(htmlcode), 'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))),
'studio': getStudio(htmlcode), 'studio': getStudio(htmlcode),
'year': getYear(htmlcode), 'year': getYear(htmlcode),
'outline': getOutline(htmlcode), 'outline': getOutline(dww_htmlcode),
'runtime': getRuntime(htmlcode), 'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode), 'director': getDirector(htmlcode),
'actor': getActor(htmlcode), 'actor': getActor(htmlcode),
@@ -130,6 +127,7 @@ def main_uncensored(number):
'number': getNum(htmlcode), 'number': getNum(htmlcode),
'cover': getCover(htmlcode), 'cover': getCover(htmlcode),
'tag': getTag(htmlcode), 'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
'imagecut': 0, 'imagecut': 0,
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
@@ -138,7 +136,7 @@ def main_uncensored(number):
number2 = number.replace('-', '_') number2 = number.replace('-', '_')
htmlcode = get_html('https://www.javbus.com/' + number2) htmlcode = get_html('https://www.javbus.com/' + number2)
dic2 = { dic2 = {
'title': getTitle(htmlcode), 'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))),
'studio': getStudio(htmlcode), 'studio': getStudio(htmlcode),
'year': getYear(htmlcode), 'year': getYear(htmlcode),
'outline': '', 'outline': '',
@@ -149,6 +147,7 @@ def main_uncensored(number):
'number': getNum(htmlcode), 'number': getNum(htmlcode),
'cover': getCover(htmlcode), 'cover': getCover(htmlcode),
'tag': getTag(htmlcode), 'tag': getTag(htmlcode),
'label':getSerise(htmlcode),
'imagecut': 0, 'imagecut': 0,
} }
js2 = json.dumps(dic2, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js2 = json.dumps(dic2, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')

View File

@@ -2,15 +2,5 @@
proxy=127.0.0.1:1080 proxy=127.0.0.1:1080
[Name_Rule] [Name_Rule]
location_rule='JAV_output/'+actor+'/'+actor+'-'+year+'- '+title+'['+number+']' location_rule='JAV_output/'+actor+'/'+'['+number+']-'+title
naming_rule=actor+'-'+year+'- '+title+'['+number+']' naming_rule=number
#============================================================
#
# title=标题,number=番号stdio=片商,year=年份,director=导演actor=女优outline=简介
# runtime=时长release=上市日期cover=封面URLtag=类型
# 例子:
# rule=str('JAV_output/'+actor+'/'+actor+' '+' ['+year+']'+title+' ['+number+']')
# 例子结束
# 自定义字符(字符串)要加冒号'(内容)',字符串连接要用加号+
#
#============================================================

69
siro.py
View File

@@ -3,70 +3,74 @@ from lxml import etree
import json import json
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from ADC_function import *
def get_html(url):#网页请求核心
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
cookies = {'adc':'1'}
getweb = requests.get(str(url),timeout=10,cookies=cookies,headers=headers).text
try:
return getweb
except:
print("[-]Connect Failed! Please check your Proxy.")
def getTitle(a): def getTitle(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('//*[@id="center_column"]/div[2]/h1/text()')).strip(" ['']") result = str(html.xpath('//*[@id="center_column"]/div[2]/h1/text()')).strip(" ['']")
return result return result
def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text() def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
result=str(html.xpath('//table[2]/tr[1]/td/a/text()')).strip(" ['\\n ']") result2=str(html.xpath('//table/tr[1]/td[1]/text()')).strip(" ['\\n ']")
return result result1 = str(html.xpath('//table/tr[1]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1+result2).strip('+')
def getStudio(a): def getStudio(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[2]/td/a/text()')).strip(" ['\\n ']") result2=str(html.xpath('//table[2]/tr[2]/td/text()')).strip(" ['\\n ']")
return result result1 = str(html.xpath('//table/tr[2]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1+result2).strip('+')
def getRuntime(a): def getRuntime(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[3]/td/text()')).strip(" ['\\n ']") result2=str(html.xpath('//table/tr[3]/td[1]/text()')).strip(" ['\\n ']")
return result result1 = str(html.xpath('//table/tr[3]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+').strip('mi')
def getLabel(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table/tr[6]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[6]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getNum(a): def getNum(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[4]/td/text()')).strip(" ['\\n ']") result2=str(html.xpath('//table/tr[2]/td[4]/a/text()')).strip(" ['\\n ']")
return result result1 = str(html.xpath('//table/tr[2]/td[4]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getYear(a): def getYear(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
#result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']") result2=str(html.xpath('//table/tr[2]/td[5]/a/text()')).strip(" ['\\n ']")
result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']") result1=str(html.xpath('//table/tr[2]/td[5]/text()')).strip(" ['\\n ']")
return result return result2+result1
def getRelease(a): def getRelease(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']") result2=str(html.xpath('//table/tr[5]/td[1]/text()')).strip(" ['\\n ']")
return result result1 = str(html.xpath('//table/tr[5]/a/td[1]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getTag(a): def getTag(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
result=str(html.xpath('//table[2]/tr[9]/td/text()')).strip(" ['\\n ']") result2=str(html.xpath('//table/tr[8]/td[1]/a/text()')).strip(" ['\\n ']")
return result result1=str(html.xpath('//table/tr[8]/td[1]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getCover(htmlcode): def getCover(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser()) html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="center_column"]/div[2]/div[1]/div/div/h2/img/@src')).strip(" ['']") result = str(html.xpath('//*[@id="center_column"]/div[2]/div[1]/div/div/h2/img/@src')).strip(" ['']")
return result return result
def getDirector(a): def getDirector(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('//table[2]/tr[7]/td/a/text()')).strip(" ['\\n ']") result1 = str(html.xpath('//table/tr[2]/td[1]/text()')).strip(" ['\\n ']")
return result result2 = str(html.xpath('//table/tr[2]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getOutline(htmlcode): def getOutline(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser()) html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="introduction"]/dd/p[1]/text()')).strip(" ['']") result = str(html.xpath('//*[@id="introduction"]/dd/p[1]/text()')).strip(" ['']")
return result return result
def main(number): def main(number):
htmlcode=get_html('https://www.mgstage.com/product/product_detail/'+str(number)) htmlcode=get_html('https://www.mgstage.com/product/product_detail/'+str(number),cookies={'adc':'1'})
soup = BeautifulSoup(htmlcode, 'lxml') soup = BeautifulSoup(htmlcode, 'lxml')
a = str(soup.find(attrs={'class': 'detail_data'})).replace('\n ','') a = str(soup.find(attrs={'class': 'detail_data'})).replace('\n ','')
#print(a)
dic = { dic = {
'title': getTitle(htmlcode).replace("\\n",'').replace(' ',''), 'title': getTitle(htmlcode).replace("\\n",'').replace(' ',''),
'studio': getStudio(a), 'studio': getStudio(a),
'year': getYear(a), 'year': str(re.search('\d{4}',getRelease(a)).group()),
'outline': getOutline(htmlcode), 'outline': getOutline(htmlcode),
'runtime': getRuntime(a), 'runtime': getRuntime(a),
'director': getDirector(a), 'director': getDirector(a),
@@ -75,7 +79,10 @@ def main(number):
'number': number, 'number': number,
'cover': getCover(htmlcode), 'cover': getCover(htmlcode),
'imagecut': 0, 'imagecut': 0,
'tag':' ', 'tag': getTag(a).replace("'\\n',",'').replace(' ', '').replace("\\n','\\n",','),
'label':getLabel(a)
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
#print('https://www.mgstage.com/product/product_detail/'+str(number))
return js return js
#print(main('SIRO-3552'))

5
update_check.json Normal file
View File

@@ -0,0 +1,5 @@
{
"version": "0.10.3",
"version_show":"Beta 10.3",
"download": "https://github.com/wenead99/AV_Data_Capture/releases"
}