155 Commits
0.1 ... 0.10.5

Author SHA1 Message Date
wenead99
6f684e67e2 Beta 0.15 更新 2019-06-22 00:34:36 +08:00
wenead99
18cf202b5b Update README.md 2019-06-21 23:59:15 +08:00
wenead99
54b2b71472 Update README.md 2019-06-21 23:58:12 +08:00
wenead99
44ba47bafc Update README.md 2019-06-21 23:55:23 +08:00
wenead99
7eb72634d8 Update README.md 2019-06-21 20:07:44 +08:00
wenead99
5787d3470a Update README.md 2019-06-21 20:05:53 +08:00
wenead99
1fce045ac2 Update README.md 2019-06-21 20:05:09 +08:00
wenead99
794aa74782 Update README.md 2019-06-21 20:03:07 +08:00
wenead99
b2e49a99a7 Update README.md 2019-06-21 20:01:58 +08:00
wenead99
d208d53375 Update README.md 2019-06-21 20:00:15 +08:00
wenead99
7158378eca Update README.md 2019-06-21 19:59:55 +08:00
wenead99
0961d8cbe4 Update README.md 2019-06-21 19:59:41 +08:00
wenead99
6ef5d11742 Update README.md 2019-06-21 19:57:03 +08:00
wenead99
45e1d8370c Beta 10.4 更新 2019-06-21 18:27:21 +08:00
wenead99
420f995977 Update README.md 2019-06-21 18:26:25 +08:00
wenead99
dbe1f91bd9 Update README.md 2019-06-21 18:23:59 +08:00
wenead99
770c5fcb1f Update update_check.json 2019-06-21 17:54:41 +08:00
wenead99
665d1ffe43 Beta 10.4 2019-06-21 15:40:02 +08:00
wenead99
14ed221152 Update README.md 2019-06-21 10:53:34 +08:00
wenead99
c41b9c1e32 Update README.md 2019-06-21 10:16:14 +08:00
wenead99
17d4d68cbe Update README.md 2019-06-21 10:00:25 +08:00
wenead99
b5a23fe430 Beta 10.3 Update 2019.6.20 2019-06-21 00:03:43 +08:00
wenead99
2747be4a21 Update README.md 2019-06-20 20:49:40 +08:00
wenead99
02da503a2f Update update_check.json 2019-06-20 19:13:38 +08:00
wenead99
31c5d5c314 Update update_check.json 2019-06-20 19:10:28 +08:00
wenead99
22e5b9aa44 Update update_check.json 2019-06-20 19:07:42 +08:00
wenead99
400e8c9678 Update update_check.json 2019-06-20 19:03:24 +08:00
wenead99
b06e744c0c Beta 0.10.3更新检测 2019-06-19 20:53:10 +08:00
wenead99
ddbfe7765b Beta 10.3更新检测 2019-06-19 20:50:44 +08:00
wenead99
c0f47fb712 Update README.md 2019-06-19 18:22:31 +08:00
wenead99
7b0e8bf5f7 Beta 10.2 Update 2019-06-19 18:21:19 +08:00
wenead99
fa8ea58fe6 Beta 10.2 Update 2019-06-19 18:20:30 +08:00
wenead99
8c824e5d29 Beta 10.2 Update 2019-06-19 18:20:02 +08:00
wenead99
764fba74ec Beta 10.2 Update 2019-06-19 18:19:34 +08:00
wenead99
36c436772c Update README.md 2019-06-19 13:43:04 +08:00
wenead99
897a621adc Update README.md 2019-06-19 13:42:19 +08:00
wenead99
1f5802cdb4 Update README.md 2019-06-19 13:41:05 +08:00
wenead99
0a57e2bab6 Update README.md 2019-06-19 11:03:44 +08:00
wenead99
3ddfe94f2b Update README.md 2019-06-19 11:02:31 +08:00
wenead99
c6fd5ac565 Update README.md 2019-06-19 00:05:01 +08:00
wenead99
2a7cdcf12d Update README.md 2019-06-18 23:56:34 +08:00
wenead99
759e546534 Beta 10.1 修复FC2元数据提取异常 2019-06-18 18:11:04 +08:00
wenead99
222337a5f0 修改FC2提取异常 2019-06-18 18:02:01 +08:00
wenead99
9fb6122a9d Update AV_Data_Capture.py 2019-06-18 16:58:32 +08:00
wenead99
9f0c01d62e Update README.md 2019-06-18 16:57:39 +08:00
wenead99
6ed79d8fcb Update README.md 2019-06-18 16:56:22 +08:00
wenead99
abb53c3219 Update README.md 2019-06-18 16:55:43 +08:00
wenead99
6578d807ca Update README.md 2019-06-18 16:55:10 +08:00
wenead99
e9acd32fd7 Update README.md 2019-06-18 16:54:49 +08:00
wenead99
0c64165b49 Update README.md 2019-06-18 16:53:45 +08:00
wenead99
6278659e55 Update README.md 2019-06-18 16:53:11 +08:00
wenead99
ca2c97a98f Update README.md 2019-06-17 23:45:00 +08:00
wenead99
164cc464dc Update README.md 2019-06-17 23:40:17 +08:00
wenead99
faa99507ad Update README.md 2019-06-17 19:11:54 +08:00
wenead99
d7a48d2829 Update README.md 2019-06-17 19:11:35 +08:00
wenead99
c40936f1c4 Update README.md 2019-06-17 19:10:22 +08:00
wenead99
38b26d4161 Update README.md 2019-06-17 19:09:55 +08:00
wenead99
e17dffba4e Update README.md 2019-06-17 18:34:26 +08:00
wenead99
ae1a91bf28 Update README.md 2019-06-17 18:31:46 +08:00
wenead99
208c24b606 Update README.md 2019-06-17 18:31:10 +08:00
wenead99
751450ebad Update README.md 2019-06-17 18:30:46 +08:00
wenead99
e429ca3c7d Update README.md 2019-06-17 18:29:31 +08:00
wenead99
9e26558666 Update README.md 2019-06-17 18:26:11 +08:00
wenead99
759b30ec5c Update README.md 2019-06-17 18:24:20 +08:00
wenead99
b7c195b76e Update README.md 2019-06-17 18:17:37 +08:00
wenead99
7038fcf8ed Update README.md 2019-06-17 18:12:38 +08:00
wenead99
54041313dc Add files via upload 2019-06-17 18:04:04 +08:00
wenead99
47a29f6628 Update README.md 2019-06-17 18:03:14 +08:00
wenead99
839610d230 Update README.md 2019-06-17 16:53:03 +08:00
wenead99
a0b324c1a8 Update README.md 2019-06-17 16:52:23 +08:00
wenead99
1996807702 Add files via upload 2019-06-17 16:28:07 +08:00
wenead99
e91b7a85bf 0.10 Beta10 Update 2019-06-17 16:14:17 +08:00
wenead99
dddaf5c74f Update README.md 2019-06-16 17:08:58 +08:00
wenead99
2a3935b221 Update README.md 2019-06-16 17:07:36 +08:00
wenead99
a5becea6c9 Update README.md 2019-06-16 15:39:06 +08:00
wenead99
1381b66619 Update README.md 2019-06-16 12:40:21 +08:00
wenead99
eb946d948f Update 0.9 2019-06-15 20:40:13 +08:00
wenead99
46087ba886 Update README.md 2019-06-11 19:10:57 +08:00
wenead99
f8764d1b81 Update README.md 2019-06-11 19:10:01 +08:00
wenead99
b9095452da Update README.md 2019-06-11 19:09:34 +08:00
wenead99
be8d23e782 Update README.md 2019-06-11 19:08:45 +08:00
wenead99
532c5bfbe3 0.6 更新:Javbus抓取的元数据支持标签功能,优化无码视频抓取 2019-06-02 23:10:01 +08:00
wenead99
cfccd00367 Update README.md 2019-06-02 02:16:36 +08:00
wenead99
56801d3910 Update README.md 2019-06-02 01:53:52 +08:00
wenead99
2d1efe272e Update README.md 2019-06-02 01:30:43 +08:00
wenead99
ff7ed13419 Update README.md 2019-06-01 21:22:05 +08:00
wenead99
7fade0fee3 Update README.md 2019-06-01 21:21:11 +08:00
wenead99
2dc2da4b41 0.5更新 2019-06-01 21:16:40 +08:00
wenead99
79679adbac Update README.md 2019-05-30 20:13:58 +08:00
wenead99
637ae06a14 Update README.md 2019-05-30 20:13:33 +08:00
wenead99
917614975e Beta 0.4 更新 *更好的错误提示 2019-05-30 19:15:02 +08:00
wenead99
e3d9955e5b Beta 0.4 更新 *新增:重命名文件时替换'_'为'-' 2019-05-30 19:13:45 +08:00
wenead99
69fa8d3f05 Beta 0.4 更新 *新增:替换'_'为'-' 2019-05-30 19:12:58 +08:00
wenead99
b5f82f77a1 0.3.2 Beta 完善错误提示 2019-05-29 12:10:16 +08:00
wenead99
5f627d24e0 0.3.1 Beta 修改无法导出nfo文件和介绍BUG 2019-05-29 09:07:17 +08:00
wenead99
6817cd2093 0.3.1 Beta 修改无法导出nfo文件BUG 2019-05-29 08:42:28 +08:00
wenead99
bda0c5becd 0.3 Beta 更好的代码风格,便于阅读 2019-05-28 19:47:22 +08:00
wenead99
abcf1a49e7 0.3 Beta 新增删除空目录功能 2019-05-28 19:45:57 +08:00
wenead99
23de02486c Update README.md 2019-05-28 09:44:43 +08:00
wenead99
8046fa1ef6 Update README.md 2019-05-28 08:23:35 +08:00
wenead99
0a6af7c41b Update README.md 2019-05-28 08:23:12 +08:00
wenead99
08cdabb59d Update README.md 2019-05-28 08:19:06 +08:00
wenead99
9c495c4a54 Update README.md 2019-05-27 23:57:39 +08:00
wenead99
51e0467be0 Update README.md 2019-05-27 23:31:15 +08:00
wenead99
59720d8c09 Update README.md 2019-05-27 23:25:01 +08:00
wenead99
150943694b Update README.md 2019-05-27 23:13:55 +08:00
wenead99
f40683e5ec Update README.md 2019-05-27 22:57:58 +08:00
wenead99
912e5ed0fb Update README.md 2019-05-27 22:52:25 +08:00
wenead99
f17994ecf8 Update README.md 2019-05-27 22:49:22 +08:00
wenead99
a414c8eae9 Update README.md 2019-05-27 22:46:14 +08:00
wenead99
8f0eed64c7 Update README.md 2019-05-27 21:57:41 +08:00
wenead99
5f8131f984 Update README.md 2019-05-27 21:55:25 +08:00
wenead99
a574d3a275 Update README.md 2019-05-27 18:37:33 +08:00
wenead99
1813102b0b Update README.md 2019-05-27 18:17:43 +08:00
wenead99
0ef0fed958 Add files via upload 2019-05-27 17:52:07 +08:00
wenead99
64be029c78 Delete readme5.png 2019-05-27 17:51:36 +08:00
wenead99
0f77b78133 Delete single.gif 2019-05-27 17:49:10 +08:00
wenead99
b87f41dc8e Delete readme5.png 2019-05-27 17:49:01 +08:00
wenead99
ecdacdbcb8 Delete readme4.PNG 2019-05-27 17:48:51 +08:00
wenead99
8308f66ae7 Delete readme3.PNG 2019-05-27 17:48:42 +08:00
wenead99
44c89590ee Delete readme2.PNG 2019-05-27 17:48:33 +08:00
wenead99
e7b0980524 Delete readme1.PNG 2019-05-27 17:48:23 +08:00
wenead99
1cc328bbd7 Delete flow_chart2.png 2019-05-27 17:48:14 +08:00
wenead99
66fbb5efbb Update README.md 2019-05-27 17:47:51 +08:00
wenead99
88fded90ee 这是教程图片文件夹
This is readms.md's images folder
2019-05-27 17:47:01 +08:00
wenead99
b055f6ca7f Create This is readms.md's images folder 2019-05-27 17:45:12 +08:00
wenead99
254e37d9cf Update README.md 2019-05-27 17:25:41 +08:00
wenead99
1c87c26f32 Update README.md 2019-05-27 17:24:18 +08:00
wenead99
10f13f882f 福利 2019-05-27 17:20:28 +08:00
wenead99
df17ee59f0 Update README.md 2019-05-27 17:19:53 +08:00
wenead99
07fa18080b Update README.md 2019-05-27 17:11:12 +08:00
wenead99
2bdf2ff283 Update README.md 2019-05-27 17:10:31 +08:00
wenead99
5445f1773c 教程gif 2019-05-27 17:08:34 +08:00
wenead99
b629fb4615 Add files via upload 2019-05-27 17:00:16 +08:00
wenead99
d50f5d2f34 Update README.md 2019-05-27 16:59:55 +08:00
wenead99
13f36ddf8b Delete flow_chart.png 2019-05-27 16:59:39 +08:00
wenead99
295ea2d174 新流程图 0.2 Beta 2019-05-27 16:58:19 +08:00
wenead99
ad4fc237b1 删除旧流程图 2019-05-27 16:57:55 +08:00
wenead99
55d8f02eee 删除旧流程图 2019-05-27 16:57:25 +08:00
wenead99
d6cbd3bdb2 新流程图 for 0.2 Beta 2019-05-27 16:56:57 +08:00
wenead99
99942745c4 Update README.md 2019-05-27 16:55:40 +08:00
wenead99
4daee989e6 新流程图 for 0.2 Beta 2019-05-27 16:55:20 +08:00
wenead99
99b04ef8b5 0.2 beta 更新
2019.5.27更新:
1.支持多媒体格式
2.更智能的程序终止机制
3.改善程序源码本体
2019-05-27 16:47:26 +08:00
wenead99
9707f1b38a 0.2 beta 更新
2019.5.27更新:
1.支持多媒体格式
2.更智能的程序终止机制
3.改善程序源码本体
2019-05-27 16:46:27 +08:00
wenead99
0a9bdd8729 Update README.md 2019-05-27 16:09:16 +08:00
wenead99
8a091a710e Update README.md 2019-05-27 15:39:08 +08:00
wenead99
c5a0ef1f77 Update README.md 2019-05-27 15:38:00 +08:00
wenead99
00b03a4ec4 优化readme 2 2019-05-27 15:37:36 +08:00
wenead99
6b4d9ca829 Add files via upload 2019-05-27 15:36:20 +08:00
wenead99
ef93ba5eb5 Update README.md
优化readme.md
2019-05-27 15:35:32 +08:00
wenead99
46b4557471 Update README.md 2019-05-27 00:01:52 +08:00
wenead99
6b2205887f Update AV_Data_Capture.py 2019-05-26 22:38:22 +08:00
wenead99
3e0dc60bad Update AV_Data_Capture.py 2019-05-26 22:37:11 +08:00
wenead99
424ede24eb Update README.md 2019-05-26 21:29:43 +08:00
wenead99
e307ec21e3 Update README.md 2019-05-26 20:38:00 +08:00
18 changed files with 880 additions and 196 deletions

42
ADC_function.py Normal file
View File

@@ -0,0 +1,42 @@
import requests
from configparser import RawConfigParser
import os
import re
# content = open('proxy.ini').read()
# content = re.sub(r"\xfe\xff","", content)
# content = re.sub(r"\xff\xfe","", content)
# content = re.sub(r"\xef\xbb\xbf","", content)
# open('BaseConfig.cfg', 'w').write(content)
config = RawConfigParser()
if os.path.exists('proxy.ini'):
config.read('proxy.ini', encoding='UTF-8')
else:
with open("proxy.ini", "wt", encoding='UTF-8') as code:
print("[proxy]",file=code)
print("proxy=127.0.0.1:1080",file=code)
def get_html(url,cookies = None):#网页请求核心
if not str(config['proxy']['proxy']) == '':
proxies = {
"http" : "http://" + str(config['proxy']['proxy']),
"https": "https://" + str(config['proxy']['proxy'])
}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36'}
getweb = requests.get(str(url), headers=headers, proxies=proxies,cookies=cookies)
getweb.encoding = 'utf-8'
# print(getweb.text)
try:
return getweb.text
except:
print('[-]Connected failed!:Proxy error')
else:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
getweb = requests.get(str(url), headers=headers,cookies=cookies)
getweb.encoding = 'utf-8'
try:
return getweb.text
except:
print("[-]Connect Failed.")

View File

@@ -1,13 +1,81 @@
import glob import glob
import os import os
import time import time
import re
import sys
from ADC_function import *
import json
#a=glob.glob(os.getcwd()+r"\*.py") version='0.10.4'
a=glob.glob(os.getcwd()+r"\*\**\*.mp4")
b=glob.glob(os.getcwd()+r"\*.mp4")
for i in b:
a.append(i)
os.chdir(os.getcwd()) def UpdateCheck():
for i in a: html2 = get_html('https://raw.githubusercontent.com/wenead99/AV_Data_Capture/master/update_check.json')
os.system('python core.py'+' "'+i+'"') html = json.loads(str(html2))
if not version == html['version']:
print('[*] * New update '+html['version']+' *')
print('[*] * Download *')
print('[*] '+html['download'])
print('[*]=====================================')
def movie_lists():
#MP4
a2 = glob.glob(os.getcwd() + r"\*.mp4")
# AVI
b2 = glob.glob(os.getcwd() + r"\*.avi")
# RMVB
c2 = glob.glob(os.getcwd() + r"\*.rmvb")
# WMV
d2 = glob.glob(os.getcwd() + r"\*.wmv")
# MOV
e2 = glob.glob(os.getcwd() + r"\*.mov")
# MKV
f2 = glob.glob(os.getcwd() + r"\*.mkv")
# FLV
g2 = glob.glob(os.getcwd() + r"\*.flv")
# TS
h2 = glob.glob(os.getcwd() + r"\*.ts")
total = a2+b2+c2+d2+e2+f2+g2+h2
return total
def lists_from_test(custom_nuber): #电影列表
a=[]
a.append(custom_nuber)
return a
def CEF(path):
files = os.listdir(path) # 获取路径下的子文件(夹)列表
for file in files:
try: #试图删除空目录,非空目录删除会报错
os.removedirs(path + '/' + file) # 删除这个空文件夹
print('[+]Deleting empty folder',path + '/' + file)
except:
a=''
def rreplace(self, old, new, *max):
#从右开始替换文件名中内容,源字符串,将被替换的子字符串, 新字符串用于替换old子字符串可选字符串, 替换不超过 max 次
count = len(self)
if max and str(max[0]).isdigit():
count = max[0]
return new.join(self.rsplit(old, count))
if __name__ =='__main__':
print('[*]===========AV Data Capture===========')
print('[*] Version '+version)
print('[*]=====================================')
UpdateCheck()
os.chdir(os.getcwd())
for i in movie_lists(): #遍历电影列表 交给core处理
if '_' in i:
os.rename(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), rreplace(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), '_', '-', 1))
i = rreplace(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), '_', '-', 1)
os.system('python core.py' + ' "' + i + '"') #选择从py文件启动 用于源码py
#os.system('core.exe' + ' "' + i + '"') #选择从exe文件启动用于EXE版程序
print("[*]=====================================")
print("[!]Cleaning empty folders")
CEF('JAV_output')
print("[+]All finished!!!")
input("[+][+]Press enter key exit, you can check the error messge before you exit.\n[+][+]按回车键结束,你可以在结束之前查看错误信息。")

125
README.md
View File

@@ -1,28 +1,111 @@
日本AV元数据抓取工具 # AV Data Capture 日本AV元数据刮削器
======================== # 目录
**关于本软件** * [前言](#前言)
* [捐助二维码](#捐助二维码)
* [免责声明](#免责声明)
* [如何使用](#如何使用)
* [简明教程](#简要教程)
# 前言
&emsp;&emsp;目前我下的AV越来越多也意味着AV要集中地管理形成媒体库。现在有两款主流的AV元数据获取器"EverAver"和"Javhelper"。前者的优点是元数据获取比较全,缺点是不能批量处理;后者优点是可以批量处理,但是元数据不够全。<br>
&emsp;&emsp;为此综合上述软件特点我写出了本软件为了方便的管理本地AV和更好的手冲体验。
目前我下的AV越来越多也意味着AV要集中地管理形成媒体库。现在有两款主流的AV元数据获取器"EverAver"和"Javhelper"。前者的优点是元数据获取比较全,缺点是不能批量处理;后者优点是可以批量处理,但是元数据不够全。 **可以结合pockies大神的[ 打造本地AV毛片媒体库 ](https://pockies.github.io/2019/03/25/everaver-emby-kodi/)看本文档**<br>
**tg官方电报群:[ 点击进群](https://t.me/AV_Data_Capture_Official)**<br>
**推荐用法: 按照 [如何使用](#如何使用) 使用该软件后,对于不能正常获取元数据的电影可以用[ Everaver ](http://everaver.blogspot.com/)来补救**<br>
![](https://i.loli.net/2019/06/02/5cf2b5d0bbecf69019.png)
为此我写出了本软件为了方便的管理本地AV和更好的手冲体验。 # 捐助二维码
如果你觉得本软件好用,可以考虑捐助作者,多少钱无所谓,不强求,你的支持就是我的动力,非常感谢您的捐助
![](https://i.loli.net/2019/06/21/5d0cb02ca489d19393.png)
**如何使用** # 免责声明
1. 请安装requests,pyquery,lxml,Beautifulsoup,pillow模块,可在CMD逐条输入以下命令安装 1.本软件仅供技术交流,学术交流使用<br>
`pip install requests` 2.本软件不提供任何有关淫秽色情的影视下载方式<br>
`pip install pyquery` 3.使用者使用该软件产生的一切法律后果由使用者承担<br>
`pip install lxml` 4.该软件禁止任何商用行为<br>
`pip install beautifulsoup4`
`pip install pillow`
2. 你的AV在被软件管理前最好命名为番号:**COSQ-004.mp4**
文件名中间要有减号"-",没有多余元数据只有番号为最佳,可以让软件更好获取元数据
![](readme1.PNG) # 如何使用
release的程序可脱离python环境运行可跳过第一步<br>
下载地址(仅限Windows):https://github.com/wenead99/AV_Data_Capture/releases
### 简要教程:<br>
**1.把软件拉到和电影的同一目录<br>2.设置ini文件的代理路由器拥有自动代理功能的可以把proxy=后面内容去掉)<br>3.运行软件等待完成<br>4.把JAV_output导入至KODI,EMBY中。<br>详细请看以下教程**
## 1.请安装模块,在CMD/终端逐条输入以下命令安装
```python
pip install requests
```
###
```python
pip install pyquery
```
###
```python
pip install lxml
```
###
```python
pip install Beautifulsoup4
```
###
```python
pip install pillow
```
###
## 2.配置
#### 1.针对网络审查国家或地区
打开```proxy.ini```,在```[proxy]```下的```proxy```行设置本地代理地址和端口支持Shadowsocks/R,V2RAY本地代理端口:<br>
例子:```proxy=127.0.0.1:1080```<br>
**如果遇到tineout错误可以把文件的proxy=后面的地址和端口删除并开启vpn全局模式或者重启电脑vpn网卡**
#### 2.(可选)设置自定义目录和影片重命名规则
**已有默认配置**<br>
##### 命名参数<br>
>title = 片名<br>
>actor = 演员<br>
>studio = 公司<br>
>director = 导演<br>
>release = 发售日<br>
>year = 发行年份<br>
>number = 番号<br>
>cover = 封面链接<br>
>tag = 类型<br>
>outline = 简介<br>
>runtime = 时长<br>
##### **例子**:<br>
>目录结构:'JAV_output/'+actor+'/'+actor+' '+' ['+year+']'+title+' ['+number+']'<br>
>影片命名(上面目录之下的文件):'['+number+']-'+title
## 3.关于番号提取失败或者异常
**目前可以提取元素的影片:JAVBUS上有元数据的电影素人系列(需要日本代理):300Maan,259luxu,siro等,FC2系列**<br>
>下一张图片来自Pockies的blog:https://pockies.github.io/2019/03/25/everaver-emby-kodi/ 原作者已授权<br>
![](https://raw.githubusercontent.com/Pockies/pic/master/741f9461gy1g1cxc31t41j20i804zdgo.jpg)
目前作者已经完善了番号提取机制,功能较为强大,可提取上述文件名的的番号,如果出现提取失败或者异常的情况,请用以下规则命名<br>
**妈蛋不要喂软件那么多野鸡片子,不让软件好好活了,操**
```
COSQ-004.mp4
```
文件名中间要有下划线或者减号"_","-",没有多余的内容只有番号为最佳,可以让软件更好获取元数据
对于多影片重命名,可以用[ReNamer](http://www.den4b.com/products/renamer)来批量重命名
## 4.把软件拷贝和AV的统一目录下
## 5.运行 ```AV_Data_capture.py/.exe```
你也可以把单个影片拖动到core程序<br>
![](https://i.loli.net/2019/06/02/5cf2b5d03640e73201.gif)
## 6.软件会自动把元数据获取成功的电影移动到JAV_output文件夹中根据女优分类失败的电影移动到failed文件夹中。
## 7.把JAV_output文件夹导入到EMBY,KODI中根据封面选片子享受手冲乐趣
## 8.输出的文件如下
![](https://i.loli.net/2019/06/02/5cf2b5cfd1b0226763.png)
![](https://i.loli.net/2019/06/02/5cf2b5cfd1b0246492.png)
![](https://i.loli.net/2019/06/02/5cf2b5d009e4930666.png)
## 软件流程图
![](https://i.loli.net/2019/06/02/5cf2bb9a9e2d997635.png)
3. 把软件拷贝到AV的所在目录下运行程序中国大陆用户必须挂VPNShsadowsocks开全局代理
4. 运行AV_Data_capture.py
5. 软件会自动把元数据获取成功的电影移动到JAV_output文件夹中根据女优分类失败的电影移动到failed文件夹中。
![](readme2.PNG)
![](readme3.PNG)
![](readme4.PNG)

395
core.py
View File

@@ -1,186 +1,297 @@
import re import re
import requests #need install
from pyquery import PyQuery as pq#need install
from lxml import etree#need install
import os import os
import os.path import os.path
import shutil import shutil
from bs4 import BeautifulSoup#need install from PIL import Image
from PIL import Image#need install
import time import time
import javbus
import json
import fc2fans_club
import siro
from ADC_function import *
from configparser import ConfigParser
def get_html(url): #初始化全局变量
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} title=''
getweb = requests.get(str(url),proxies={"http": "http://127.0.0.1:2334","https": "https://127.0.0.1:2334"},timeout=5,headers=headers).text studio=''
year=''
outline=''
runtime=''
director=''
actor_list=[]
actor=''
release=''
number=''
cover=''
imagecut=''
tag=[]
naming_rule =''#eval(config['Name_Rule']['naming_rule'])
location_rule=''#eval(config['Name_Rule']['location_rule'])
#=====================本地文件处理===========================
def argparse_get_file():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file", help="Write the file path on here")
args = parser.parse_args()
return args.file
def CreatFailedFolder():
if not os.path.exists('failed/'): # 新建failed文件夹
try: try:
return getweb os.makedirs('failed/')
except:
print("[-]failed!can not be make folder 'failed'\n[-](Please run as Administrator)")
os._exit(0)
def getNumberFromFilename(filepath):
global title
global studio
global year
global outline
global runtime
global director
global actor_list
global actor
global release
global number
global cover
global imagecut
global tag
global naming_rule
global location_rule
#================================================获取文件番号================================================
try: #试图提取番号
# ====番号获取主程序====
try: # 普通提取番号 主要处理包含减号-的番号
filepath.strip('22-sht.me').strip('-HD').strip('-hd')
filename = str(re.sub("\[\d{4}-\d{1,2}-\d{1,2}\] - ", "", filepath)) # 去除文件名中文件名
file_number = re.search('\w+-\d+', filename).group()
except: # 提取不含减号-的番号
try: # 提取东京热番号格式 n1087
filename1 = str(re.sub("h26\d", "", filepath)).strip('Tokyo-hot').strip('tokyo-hot')
filename0 = str(re.sub(".*?\.com-\d+", "", filename1)).strip('_')
file_number = str(re.search('n\d{4}', filename0).group(0))
except: # 提取无减号番号
filename1 = str(re.sub("h26\d", "", filepath)) # 去除h264/265
filename0 = str(re.sub(".*?\.com-\d+", "", filename1))
file_number2 = str(re.match('\w+', filename0).group())
file_number = str(file_number2.replace(re.match("^[A-Za-z]+", file_number2).group(),re.match("^[A-Za-z]+", file_number2).group() + '-'))
if not re.search('\w-', file_number).group() == 'None':
file_number = re.search('\w+-\w+', filename).group()
#上面是插入减号-到番号中
print("[!]Making Data for [" + filename + "],the number is [" + file_number + "]")
# ====番号获取主程序=结束===
except Exception as e: #番号提取异常
print('[-]'+str(os.path.basename(filepath))+' Cannot catch the number :')
print('[-]' + str(os.path.basename(filepath)) + ' :', e)
print('[-]Move ' + os.path.basename(filepath) + ' to failed folder')
shutil.move(filepath, str(os.getcwd()) + '/' + 'failed/')
os._exit(0)
except IOError as e2:
print('[-]' + str(os.path.basename(filepath)) + ' Cannot catch the number :')
print('[-]' + str(os.path.basename(filepath)) + ' :',e2)
print('[-]Move ' + os.path.basename(filepath) + ' to failed folder')
shutil.move(filepath, str(os.getcwd()) + '/' + 'failed/')
os._exit(0)
try:
# ================================================网站规则添加开始================================================
try: #添加 需要 正则表达式的规则
#=======================javbus.py=======================
if re.search('^\d{5,}', file_number).group() in filename:
json_data = json.loads(javbus.main_uncensored(file_number))
except: #添加 无需 正则表达式的规则
# ====================fc2fans_club.py===================
if 'fc2' in filename:
json_data = json.loads(fc2fans_club.main(file_number.strip('fc2_').strip('fc2-').strip('ppv-').strip('PPV-')))
elif 'FC2' in filename:
json_data = json.loads(fc2fans_club.main(file_number.strip('FC2_').strip('FC2-').strip('ppv-').strip('PPV-')))
#print(file_number.strip('FC2_').strip('FC2-').strip('ppv-').strip('PPV-'))
#=======================javbus.py=======================
else:
json_data = json.loads(javbus.main(file_number))
#================================================网站规则添加结束================================================
title = json_data['title']
studio = json_data['studio']
year = json_data['year']
outline = json_data['outline']
runtime = json_data['runtime']
director = json_data['director']
actor_list= str(json_data['actor']).strip("[ ]").replace("'",'').replace(" ",'').split(',') #字符串转列表
release = json_data['release']
number = json_data['number']
cover = json_data['cover']
imagecut = json_data['imagecut']
tag = str(json_data['tag']).strip("[ ]").replace("'",'').replace(" ",'').split(',') #字符串转列表
actor = str(actor_list).strip("[ ]").replace("'",'').replace(" ",'')
#====================处理异常字符====================== #\/:*?"<>|
#if "\\" in title or "/" in title or ":" in title or "*" in title or "?" in title or '"' in title or '<' in title or ">" in title or "|" in title or len(title) > 200:
# title = title.
naming_rule = eval(config['Name_Rule']['naming_rule'])
location_rule =eval(config['Name_Rule']['location_rule'])
except IOError as e:
print('[-]'+str(e))
print('[-]Move ' + filename + ' to failed folder')
shutil.move(filepath, str(os.getcwd())+'/'+'failed/')
os._exit(0)
except Exception as e: except Exception as e:
print(e) print('[-]'+str(e))
except IOError as e1: print('[-]Move ' + filename + ' to failed folder')
print(e1) shutil.move(filepath, str(os.getcwd())+'/'+'failed/')
#================================================ os._exit(0)
def getTitle(htmlcode): path = '' #设置path为全局变量后面移动文件要用
doc = pq(htmlcode) def creatFolder():
title=str(doc('div.container h3').text()).replace(' ','-') global path
return title if len(actor) > 240: #新建成功输出文件夹
def getStudio(htmlcode): path = location_rule.replace("'actor'","'超多人'",3).replace("actor","'超多人'",3) #path为影片+元数据所在目录
html = etree.fromstring(htmlcode,etree.HTMLParser()) #print(path)
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[5]/a/text()')).strip(" ['']") else:
return result path = location_rule
def getYear(htmlcode): #print(path)
html = etree.fromstring(htmlcode,etree.HTMLParser()) if not os.path.exists(path):
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']") os.makedirs(path)
result2 = str(re.search('\d{4}', result).group(0)) #=====================资源下载部分===========================
return result2
def getCover(htmlcode):
doc = pq(htmlcode)
image = doc('a.bigImage')
return image.attr('href')
print(image.attr('href'))
def getRelease(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']")
return result
def getRuntime(htmlcode):
soup = BeautifulSoup(htmlcode, 'lxml')
a = soup.find(text=re.compile('分鐘'))
return a
def getActor(htmlcode):
b=[]
soup=BeautifulSoup(htmlcode,'lxml')
a=soup.find_all(attrs={'class':'star-name'})
for i in a:
b.append(i.text)
return ",".join(b)
def getNum(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[1]/span[2]/text()')).strip(" ['']")
return result
def getDirector(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[4]/a/text()')).strip(" ['']")
return result
def getOutline(htmlcode):
doc = pq(htmlcode)
result = str(doc('tr td div.mg-b20.lh4 p.mg-b20').text())
return result
#================================================
def DownloadFileWithFilename(url,filename,path): #path = examle:photo , video.in the Project Folder! def DownloadFileWithFilename(url,filename,path): #path = examle:photo , video.in the Project Folder!
import requests config = ConfigParser()
import re config.read('proxy.ini', encoding='UTF-8')
proxy = str(config['proxy']['proxy'])
if not str(config['proxy']['proxy']) == '':
try: try:
if not os.path.exists(path): if not os.path.exists(path):
os.makedirs(path) os.makedirs(path)
r = requests.get(url) headers = {
with open(str(path) + "/"+str(filename), "wb") as code: 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
r = requests.get(url, headers=headers,proxies={"http": "http://" + str(proxy), "https": "https://" + str(proxy)})
with open(str(path) + "/" + filename, "wb") as code:
code.write(r.content) code.write(r.content)
# print('[+]Downloaded!',str(path) + "/"+str(filename)) # print(bytes(r),file=code)
except IOError as e: except IOError as e:
print("[-]Download Failed1!") print("[-]Movie not found in All website!")
print("[-]Movie not found in Javbus.com!") print("[-]" + filename, e)
print("[*]=====================================") # print("[*]=====================================")
return "failed" return "failed"
except Exception as e1: except Exception as e1:
print(e1) print(e1)
print("[-]Download Failed2!") print("[-]Download Failed2!")
time.sleep(3) time.sleep(3)
os._exit(0) os._exit(0)
else:
def PrintFiles(path):
try: try:
if not os.path.exists(path): if not os.path.exists(path):
os.makedirs(path) os.makedirs(path)
with open(path + "/" + getNum(html) + ".nfo", "wt", encoding='UTF-8') as code: headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
r = requests.get(url, headers=headers)
with open(str(path) + "/" + filename, "wb") as code:
code.write(r.content)
# print(bytes(r),file=code)
except IOError as e:
print("[-]Movie not found in All website!")
print("[-]" + filename, e)
# print("[*]=====================================")
return "failed"
except Exception as e1:
print(e1)
print("[-]Download Failed2!")
time.sleep(3)
os._exit(0)
def PrintFiles(path,naming_rule):
global title
try:
if not os.path.exists(path):
os.makedirs(path)
with open(path + "/" + number + ".nfo", "wt", encoding='UTF-8') as code:
print("<movie>", file=code) print("<movie>", file=code)
print(" <title>" + getTitle(html) + "</title>", file=code) print(" <title>" + naming_rule + "</title>", file=code)
print(" <set>", file=code) print(" <set>", file=code)
print(" </set>", file=code) print(" </set>", file=code)
print(" <studio>" + getStudio(html) + "+</studio>", file=code) print(" <studio>" + studio + "+</studio>", file=code)
print(" <year>" + getYear(html) + "</year>", file=code) print(" <year>" + year + "</year>", file=code)
print(" <outline>"+getOutline(html_outline)+"</outline>", file=code) print(" <outline>"+outline+"</outline>", file=code)
print(" <plot>"+getOutline(html_outline)+"</plot>", file=code) print(" <plot>"+outline+"</plot>", file=code)
print(" <runtime>"+str(getRuntime(html)).replace(" ","")+"</runtime>", file=code) print(" <runtime>"+str(runtime).replace(" ","")+"</runtime>", file=code)
print(" <director>" + getDirector(html) + "</director>", file=code) print(" <director>" + director + "</director>", file=code)
print(" <poster>" + getNum(html) + ".png</poster>", file=code) print(" <poster>" + naming_rule + ".png</poster>", file=code)
print(" <thumb>" + getNum(html) + ".png</thumb>", file=code) print(" <thumb>" + naming_rule + ".png</thumb>", file=code)
print(" <fanart>"+getNum(html) + '.jpg'+"</fanart>", file=code) print(" <fanart>"+naming_rule + '.jpg'+"</fanart>", file=code)
try:
for u in actor_list:
print(" <actor>", file=code) print(" <actor>", file=code)
print(" <name>" + getActor(html) + "</name>", file=code) print(" <name>" + u + "</name>", file=code)
print(" </actor>", file=code) print(" </actor>", file=code)
print(" <maker>" + getStudio(html) + "</maker>", file=code) except:
aaaa=''
print(" <maker>" + studio + "</maker>", file=code)
print(" <label>", file=code) print(" <label>", file=code)
print(" </label>", file=code) print(" </label>", file=code)
print(" <num>" + getNum(html) + "</num>", file=code) try:
print(" <release>" + getRelease(html) + "</release>", file=code) for i in tag:
print(" <cover>"+getCover(html)+"</cover>", file=code) print(" <tag>" + i + "</tag>", file=code)
print(" <website>" + "https://www.javbus.com/"+getNum(html) + "</website>", file=code) except:
aaaaa=''
try:
for i in tag:
print(" <genre>" + i + "</genre>", file=code)
except:
aaaaaaaa=''
print(" <num>" + number + "</num>", file=code)
print(" <release>" + release + "</release>", file=code)
print(" <cover>"+cover+"</cover>", file=code)
print(" <website>" + "https://www.javbus.com/"+number + "</website>", file=code)
print("</movie>", file=code) print("</movie>", file=code)
print("[+]Writeed! "+path + "/" + getNum(html) + ".nfo") print("[+]Writeed! "+path + "/" + number + ".nfo")
except IOError as e: except IOError as e:
print("[-]Write Failed!") print("[-]Write Failed!")
print(e) print(e)
except Exception as e1: except Exception as e1:
print(e1) print(e1)
print("[-]Write Failed!") print("[-]Write Failed!")
#================================================ def imageDownload(filepath): #封面是否下载成功否则移动到failed
if __name__ == '__main__': if DownloadFileWithFilename(cover,'Backdrop.jpg', path) == 'failed':
#命令行处理
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file", help="Write the file path on here")
args = parser.parse_args()
filename=str(os.path.basename(args.file)) #\[\d{4}(\-|\/|.)\d{1,2}\1\d{1,2}\]
#去除文件名中日期
#print(filename)
deldate=str(re.sub("\[\d{4}-\d{1,2}-\d{1,2}\] - ","",filename))
#print(deldate)
number=str(re.search('\w+-\w+',deldate).group())
#print(number)
#获取网页信息
html = get_html("https://www.javbus.com/"+str(number))
html_outline=get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid="+number.replace("-",''))
#处理超长文件夹名称
if len(getActor(html)) > 240:
path = 'JAV_output' + '/' + '超多人' + '/' + getNum(html)
else:
path = 'JAV_output' + '/' + getActor(html) + '/' + getNum(html)
if not os.path.exists(path):
os.makedirs(path)
#文件路径处理
#print(str(args))
filepath = str(args).replace("Namespace(file='",'').replace("')",'').replace('\\\\', '\\')
#print(filepath)
houzhui = str(re.search('[.](AVI|RMVB|WMV|MOV|MP4|MKV|FLV|avi|rmvb|wmv|mov|mp4|mkv|flv)$',filepath).group())
print("[!]Making Data for ["+number+houzhui+"]")
#下载元数据
if not os.path.exists('failed/'):
os.makedirs('failed/')
if not os.path.exists('failed/'):
print("[-]failed!Dirs can not be make (Please run as Administrator)")
os._exit(0)
if DownloadFileWithFilename(getCover(html), getNum(html) + '.jpg', path) == 'failed':
shutil.move(filepath, 'failed/')
time.sleep(3)
os._exit(0)
else:
if DownloadFileWithFilename(getCover(html), getNum(html) + '.jpg', path) == 'failed':
shutil.move(filepath, 'failed/') shutil.move(filepath, 'failed/')
os._exit(0) os._exit(0)
DownloadFileWithFilename(getCover(html), getNum(html) + '.jpg', path) DownloadFileWithFilename(cover, 'Backdrop.jpg', path)
print('[+]Downloaded!', path +'/'+getNum(html)+'.jpg') print('[+]Image Downloaded!', path +'/'+'Backdrop.jpg')
#切割图片做封面 def cutImage():
if imagecut == 1:
try: try:
img = Image.open(path + '/' + getNum(html) + '.jpg') img = Image.open(path + '/' + 'Backdrop' + '.jpg')
img2 = img.crop((421, 0, 800, 538)) imgSize = img.size
img2.save(path + '/' + getNum(html) + '.png') w = img.width
h = img.height
img2 = img.crop((w / 1.9, 0, w, h))
img2.save(path + '/' + number + '.png')
except: except:
print('[-]Cover cut failed!') print('[-]Cover cut failed!')
# 电源文件位置处理 else:
img = Image.open(path + '/' + 'Backdrop' + '.jpg')
w = img.width
h = img.height
img.save(path + '/' + number + '.png')
def pasteFileToFolder(filepath, path): #文件路径,番号,后缀,要移动至的位置
houzhui = str(re.search('[.](AVI|RMVB|WMV|MOV|MP4|MKV|FLV|TS|avi|rmvb|wmv|mov|mp4|mkv|flv|ts)$', filepath).group())
os.rename(filepath, number + houzhui) os.rename(filepath, number + houzhui)
shutil.move(number + houzhui, path) shutil.move(number + houzhui, path)
#处理元数据
PrintFiles(path) if __name__ == '__main__':
print('[!]Finished!') filepath=argparse_get_file() #影片的路径
time.sleep(3) CreatFailedFolder()
getNumberFromFilename(filepath) #定义番号
creatFolder() #创建文件夹
imageDownload(filepath) #creatFoder会返回番号路径
PrintFiles(path,naming_rule)#打印文件
cutImage() #裁剪图
pasteFileToFolder(filepath,path) #移动文件

84
fc2fans_club.py Normal file
View File

@@ -0,0 +1,84 @@
import re
from lxml import etree#need install
import json
import ADC_function
def getTitle(htmlcode): #获取厂商
#print(htmlcode)
html = etree.fromstring(htmlcode,etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div/div[1]/h3/text()')).strip(" ['']")
result2 = str(re.sub('\D{2}2-\d+','',result)).replace(' ','',1)
#print(result2)
return result2
def getActor(htmlcode):
try:
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div/div[1]/h5[5]/a/text()')).strip(" ['']")
return result
except:
return ''
def getStudio(htmlcode): #获取厂商
html = etree.fromstring(htmlcode,etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div/div[1]/h5[3]/a[1]/text()')).strip(" ['']")
return result
def getNum(htmlcode): #获取番号
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[1]/span[2]/text()')).strip(" ['']")
#print(result)
return result
def getRelease(htmlcode2): #
#a=ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id='+str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-")+'&utm_source=aff_php&utm_medium=source_code&utm_campaign=from_aff_php')
html=etree.fromstring(htmlcode2,etree.HTMLParser())
result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/div/div[2]/dl/dd[4]/text()')).strip(" ['']")
return result
def getCover(htmlcode,number,htmlcode2): #获取厂商 #
#a = ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id=' + str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-") + '&utm_source=aff_php&utm_medium=source_code&utm_campaign=from_aff_php')
html = etree.fromstring(htmlcode2, etree.HTMLParser())
result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/div/div[1]/a/img/@src')).strip(" ['']")
if result == '':
html = etree.fromstring(htmlcode, etree.HTMLParser())
result2 = str(html.xpath('//*[@id="slider"]/ul[1]/li[1]/img/@src')).strip(" ['']")
return 'http://fc2fans.club' + result2
return 'http:' + result
def getOutline(htmlcode2,number): #获取番号 #
#a = ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id=' + str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-") + '&utm_source=aff_php&utm_medium=source_code&utm_campaign=from_aff_php')
html = etree.fromstring(htmlcode2, etree.HTMLParser())
result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[4]/p/text()')).replace("\\n",'',10000).strip(" ['']").replace("'",'',10000)
return result
def getTag(htmlcode): #获取番号
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div/div[1]/h5[4]/a/text()'))
return result.strip(" ['']").replace("'",'').replace(' ','')
def getYear(release):
try:
result = re.search('\d{4}',release).group()
return result
except:
return ''
def main(number2):
number=number2.replace('PPV','').replace('ppv','')
htmlcode2 = ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id='+str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-")+'&utm_source=aff_php&utm_medium=source_code&utm_campaign=from_aff_php')
htmlcode = ADC_function.get_html('http://fc2fans.club/html/FC2-' + number + '.html')
dic = {
'title': getTitle(htmlcode),
'studio': getStudio(htmlcode),
'year': '',#str(re.search('\d{4}',getRelease(number)).group()),
'outline': getOutline(htmlcode,number),
'runtime': getYear(getRelease(htmlcode)),
'director': getStudio(htmlcode),
'actor': getActor(htmlcode),
'release': getRelease(number),
'number': 'FC2-'+number,
'cover': getCover(htmlcode,number,htmlcode2),
'imagecut': 0,
'tag':getTag(htmlcode),
}
#print(getTitle(htmlcode))
#print(getNum(htmlcode))
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
return js
#print(main('1051725'))

194
javbus.py Normal file
View File

@@ -0,0 +1,194 @@
import re
import requests #need install
from pyquery import PyQuery as pq#need install
from lxml import etree#need install
import os
import os.path
import shutil
from bs4 import BeautifulSoup#need install
from PIL import Image#need install
import time
import json
from ADC_function import *
import siro
def getTitle(htmlcode): #获取标题
doc = pq(htmlcode)
title=str(doc('div.container h3').text()).replace(' ','-')
try:
title2 = re.sub('n\d+-','',title)
return title2
except:
return title
def getStudio(htmlcode): #获取厂商
html = etree.fromstring(htmlcode,etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[5]/a/text()')).strip(" ['']")
return result
def getYear(htmlcode): #获取年份
html = etree.fromstring(htmlcode,etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']")
return result
def getCover(htmlcode): #获取封面链接
doc = pq(htmlcode)
image = doc('a.bigImage')
return image.attr('href')
def getRelease(htmlcode): #获取出版日期
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']")
return result
def getRuntime(htmlcode): #获取分钟
soup = BeautifulSoup(htmlcode, 'lxml')
a = soup.find(text=re.compile('分鐘'))
return a
def getActor(htmlcode): #获取女优
b=[]
soup=BeautifulSoup(htmlcode,'lxml')
a=soup.find_all(attrs={'class':'star-name'})
for i in a:
b.append(i.get_text())
return b
def getNum(htmlcode): #获取番号
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[1]/span[2]/text()')).strip(" ['']")
return result
def getDirector(htmlcode): #获取导演
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[4]/a/text()')).strip(" ['']")
return result
def getOutline(htmlcode): #获取演员
doc = pq(htmlcode)
result = str(doc('tr td div.mg-b20.lh4 p.mg-b20').text())
return result
def getSerise(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[7]/a/text()')).strip(" ['']")
return result
def getTag(htmlcode): # 获取演员
tag = []
soup = BeautifulSoup(htmlcode, 'lxml')
a = soup.find_all(attrs={'class': 'genre'})
for i in a:
if 'onmouseout' in str(i):
continue
tag.append(i.get_text())
return tag
def main(number):
try:
htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = {
'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))),
'studio': getStudio(htmlcode),
'year': str(re.search('\d{4}', getYear(htmlcode)).group()),
'outline': getOutline(dww_htmlcode),
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'actor': getActor(htmlcode),
'release': getRelease(htmlcode),
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'imagecut': 1,
'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
if 'HEYZO' in number or 'heyzo' in number or 'Heyzo' in number:
htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = {
'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))),
'studio': getStudio(htmlcode),
'year': getYear(htmlcode),
'outline': getOutline(dww_htmlcode),
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'actor': getActor(htmlcode),
'release': getRelease(htmlcode),
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'imagecut': 1,
'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
}
js2 = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4,
separators=(',', ':'), ) # .encode('UTF-8')
return js2
return js
except:
a=siro.main(number)
return a
def main_uncensored(number):
htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = {
'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))).replace(getNum(htmlcode)+'-',''),
'studio': getStudio(htmlcode),
'year': getYear(htmlcode),
'outline': getOutline(dww_htmlcode),
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'actor': getActor(htmlcode),
'release': getRelease(htmlcode),
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
'imagecut': 0,
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
if getYear(htmlcode) == '' or getYear(htmlcode) == 'null':
number2 = number.replace('-', '_')
htmlcode = get_html('https://www.javbus.com/' + number2)
dic2 = {
'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))).replace(getNum(htmlcode)+'-',''),
'studio': getStudio(htmlcode),
'year': getYear(htmlcode),
'outline': '',
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'actor': getActor(htmlcode),
'release': getRelease(htmlcode),
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'tag': getTag(htmlcode),
'label':getSerise(htmlcode),
'imagecut': 0,
}
js2 = json.dumps(dic2, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js2
return js
# def return1():
# json_data=json.loads(main('ipx-292'))
#
# title = str(json_data['title'])
# studio = str(json_data['studio'])
# year = str(json_data['year'])
# outline = str(json_data['outline'])
# runtime = str(json_data['runtime'])
# director = str(json_data['director'])
# actor = str(json_data['actor'])
# release = str(json_data['release'])
# number = str(json_data['number'])
# cover = str(json_data['cover'])
# tag = str(json_data['tag'])
#
# print(title)
# print(studio)
# print(year)
# print(outline)
# print(runtime)
# print(director)
# print(actor)
# print(release)
# print(number)
# print(cover)
# print(tag)
# return1()

6
proxy.ini Normal file
View File

@@ -0,0 +1,6 @@
[proxy]
proxy=127.0.0.1:1080
[Name_Rule]
location_rule='JAV_output/'+actor+'/'+number+'-'+title
naming_rule=number+'-'+title

2
py to exe.bat Normal file
View File

@@ -0,0 +1,2 @@
pyinstaller --onefile AV_Data_Capture.py
pyinstaller --onefile core.py --hidden-import ADC_function.py --hidden-import fc2fans_club.py --hidden-import javbus.py --hidden-import siro.py

View File

@@ -0,0 +1 @@
1

BIN
readme/flow_chart2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

View File

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 3.4 KiB

View File

Before

Width:  |  Height:  |  Size: 1.3 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 16 KiB

BIN
readme/readme5.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 457 KiB

BIN
readme/single.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

88
siro.py Normal file
View File

@@ -0,0 +1,88 @@
import re
from lxml import etree
import json
import requests
from bs4 import BeautifulSoup
from ADC_function import *
def getTitle(a):
html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('//*[@id="center_column"]/div[2]/h1/text()')).strip(" ['']")
return result
def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
result2=str(html.xpath('//table/tr[1]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[1]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1+result2).strip('+')
def getStudio(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table[2]/tr[2]/td/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[2]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1+result2).strip('+')
def getRuntime(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table/tr[3]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[3]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+').strip('mi')
def getLabel(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table/tr[6]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[6]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getNum(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table/tr[2]/td[4]/a/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[2]/td[4]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getYear(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table/tr[2]/td[5]/a/text()')).strip(" ['\\n ']")
result1=str(html.xpath('//table/tr[2]/td[5]/text()')).strip(" ['\\n ']")
return result2+result1
def getRelease(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table/tr[5]/td[1]/text()')).strip(" ['\\n ']")
result1 = str(html.xpath('//table/tr[5]/a/td[1]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getTag(a):
html = etree.fromstring(a, etree.HTMLParser())
result2=str(html.xpath('//table/tr[8]/td[1]/a/text()')).strip(" ['\\n ']")
result1=str(html.xpath('//table/tr[8]/td[1]/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getCover(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="center_column"]/div[2]/div[1]/div/div/h2/img/@src')).strip(" ['']")
return result
def getDirector(a):
html = etree.fromstring(a, etree.HTMLParser())
result1 = str(html.xpath('//table/tr[2]/td[1]/text()')).strip(" ['\\n ']")
result2 = str(html.xpath('//table/tr[2]/td[1]/a/text()')).strip(" ['\\n ']")
return str(result1 + result2).strip('+')
def getOutline(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="introduction"]/dd/p[1]/text()')).strip(" ['']")
return result
def main(number):
htmlcode=get_html('https://www.mgstage.com/product/product_detail/'+str(number),cookies={'adc':'1'})
soup = BeautifulSoup(htmlcode, 'lxml')
a = str(soup.find(attrs={'class': 'detail_data'})).replace('\n ','')
#print(a)
dic = {
'title': getTitle(htmlcode).replace("\\n",'').replace(' ',''),
'studio': getStudio(a),
'year': str(re.search('\d{4}',getRelease(a)).group()),
'outline': getOutline(htmlcode),
'runtime': getRuntime(a),
'director': getDirector(a),
'actor': getActor(a),
'release': getRelease(a),
'number': number,
'cover': getCover(htmlcode),
'imagecut': 0,
'tag': getTag(a).replace("'\\n',",'').replace(' ', '').replace("\\n','\\n",','),
'label':getLabel(a)
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
#print('https://www.mgstage.com/product/product_detail/'+str(number))
return js
#print(main('SIRO-3552'))

5
update_check.json Normal file
View File

@@ -0,0 +1,5 @@
{
"version": "0.10.4",
"version_show":"Beta 10.4",
"download": "https://github.com/wenead99/AV_Data_Capture/releases"
}