19 Commits
0.11.8 ... 1.1

Author SHA1 Message Date
Yoshiko
9898932f09 Update update_check.json 2019-08-18 22:40:37 +08:00
Yoshiko
c4fc22054b Update 1.1 2019-08-18 22:40:11 +08:00
Yoshiko
449e900837 Update README.md 2019-08-18 14:52:02 +08:00
Yoshiko
e3ebbec947 Update README.md 2019-08-14 21:56:25 +08:00
Yoshiko
65a9521ab1 Update README.md 2019-08-14 21:55:51 +08:00
Yoshiko
b79a600c0d Update README.md 2019-08-14 19:29:18 +08:00
Yoshiko
30d33fe8f7 Update README.md 2019-08-14 11:50:42 +08:00
Yoshiko
b325fc1f01 Update README.md 2019-08-14 11:49:00 +08:00
Yoshiko
954fb02c0c Update README.md 2019-08-14 00:28:39 +08:00
Yoshiko
5ee398d6b5 Update Beta 11.9 2019-08-12 01:23:57 +08:00
Yoshiko
b754c11814 Update update_check.json 2019-08-12 01:21:46 +08:00
Yoshiko
5d19ae594d Update Beta 11.9 2019-08-12 01:21:34 +08:00
Yoshiko
bfa8ed3144 Update README.md 2019-08-11 00:41:01 +08:00
Yoshiko
0ec23aaa38 Update README.md 2019-08-11 00:39:31 +08:00
Yoshiko
878ae46d77 Update README.md 2019-08-11 00:31:24 +08:00
Yoshiko
766e6bbd88 Update update_check.json 2019-08-11 00:29:15 +08:00
Yoshiko
0107c7d624 Update README.md 2019-08-11 00:28:25 +08:00
Yoshiko
d0cf2d2193 Update README.md 2019-08-11 00:27:36 +08:00
Yoshiko
d1403af548 Update README.md 2019-08-10 22:28:35 +08:00
11 changed files with 334 additions and 139 deletions

View File

@@ -19,6 +19,7 @@ else:
print('[+]config.ini: not found, creating...') print('[+]config.ini: not found, creating...')
with open("config.ini", "wt", encoding='UTF-8') as code: with open("config.ini", "wt", encoding='UTF-8') as code:
print("[common]", file=code) print("[common]", file=code)
print("main_mode=1", file=code)
print("failed_output_folder=failed", file=code) print("failed_output_folder=failed", file=code)
print("success_output_folder=JAV_output", file=code) print("success_output_folder=JAV_output", file=code)
print("", file=code) print("", file=code)

View File

@@ -14,7 +14,7 @@ os.chdir(os.getcwd())
# ============global var=========== # ============global var===========
version='0.11.8' version='1.1'
config = ConfigParser() config = ConfigParser()
config.read(config_file, encoding='UTF-8') config.read(config_file, encoding='UTF-8')
@@ -25,11 +25,11 @@ Platform = sys.platform
def UpdateCheck(): def UpdateCheck():
if UpdateCheckSwitch() == '1': if UpdateCheckSwitch() == '1':
html2 = get_html('https://raw.githubusercontent.com/wenead99/AV_Data_Capture/master/update_check.json') html2 = get_html('https://raw.githubusercontent.com/yoshiko2/AV_Data_Capture/master/update_check.json')
html = json.loads(str(html2)) html = json.loads(str(html2))
if not version == html['version']: if not version == html['version']:
print('[*] * New update ' + html['version'] + ' *') print('[*] * New update ' + html['version'] + ' *')
print('[*] * Download *') print('[*] * Download *')
print('[*] ' + html['download']) print('[*] ' + html['download'])
print('[*]=====================================') print('[*]=====================================')
@@ -37,15 +37,35 @@ def UpdateCheck():
print('[+]Update Check disabled!') print('[+]Update Check disabled!')
def movie_lists(): def movie_lists():
directory = config['directory_capture']['directory'] directory = config['directory_capture']['directory']
a2 = glob.glob(r"./" + directory + "/*.mp4") mp4=[]
b2 = glob.glob(r"./" + directory + "/*.avi") avi=[]
c2 = glob.glob(r"./" + directory + "/*.rmvb") rmvb=[]
d2 = glob.glob(r"./" + directory + "/*.wmv") wmv=[]
e2 = glob.glob(r"./" + directory + "/*.mov") mov=[]
f2 = glob.glob(r"./" + directory + "/*.mkv") mkv=[]
g2 = glob.glob(r"./" + directory + "/*.flv") flv=[]
h2 = glob.glob(r"./" + directory + "/*.ts") ts=[]
total = a2 + b2 + c2 + d2 + e2 + f2 + g2 + h2 if directory=='*':
for i in os.listdir(os.getcwd()):
mp4 += glob.glob(r"./" + i + "/*.mp4")
avi += glob.glob(r"./" + i + "/*.avi")
rmvb += glob.glob(r"./" + i + "/*.rmvb")
wmv += glob.glob(r"./" + i + "/*.wmv")
mov += glob.glob(r"./" + i + "/*.mov")
mkv += glob.glob(r"./" + i + "/*.mkv")
flv += glob.glob(r"./" + i + "/*.flv")
ts += glob.glob(r"./" + i + "/*.ts")
total = mp4 + avi + rmvb + wmv + mov + mkv + flv + ts
return total
mp4 = glob.glob(r"./" + directory + "/*.mp4")
avi = glob.glob(r"./" + directory + "/*.avi")
rmvb = glob.glob(r"./" + directory + "/*.rmvb")
wmv = glob.glob(r"./" + directory + "/*.wmv")
mov = glob.glob(r"./" + directory + "/*.mov")
mkv = glob.glob(r"./" + directory + "/*.mkv")
flv = glob.glob(r"./" + directory + "/*.flv")
ts = glob.glob(r"./" + directory + "/*.ts")
total = mp4 + avi + rmvb + wmv + mov + mkv + flv + ts
return total return total
def CreatFailedFolder(): def CreatFailedFolder():
if not os.path.exists('failed/'): # 新建failed文件夹 if not os.path.exists('failed/'): # 新建failed文件夹
@@ -74,31 +94,30 @@ def rreplace(self, old, new, *max):
return new.join(self.rsplit(old, count)) return new.join(self.rsplit(old, count))
def getNumber(filepath): def getNumber(filepath):
try: # 普通提取番号 主要处理包含减号-的番号 try: # 普通提取番号 主要处理包含减号-的番号
filepath1 = filepath.replace("_", "-") try:
filepath1.strip('22-sht.me').strip('-HD').strip('-hd') filepath1 = filepath.replace("_", "-")
filename = str(re.sub("\[\d{4}-\d{1,2}-\d{1,2}\] - ", "", filepath1)) # 去除文件名中时间 filepath1.strip('22-sht.me').strip('-HD').strip('-hd')
file_number = re.search('\w+-\d+', filename).group() filename = str(re.sub("\[\d{4}-\d{1,2}-\d{1,2}\] - ", "", filepath1)) # 去除文件名中时间
return file_number file_number = re.search('\w+-\d+', filename).group()
return file_number
except:
filepath1 = filepath.replace("_", "-")
filepath1.strip('22-sht.me').strip('-HD').strip('-hd')
filename = str(re.sub("\[\d{4}-\d{1,2}-\d{1,2}\] - ", "", filepath1)) # 去除文件名中时间
file_number = re.search('\w+-\w+', filename).group()
return file_number
except: # 提取不含减号-的番号 except: # 提取不含减号-的番号
try: # 提取东京热番号格式 n1087 try:
filename1 = str(re.sub("h26\d", "", filepath)).strip('Tokyo-hot').strip('tokyo-hot') filename1 = str(re.sub("ts6\d", "", filepath)).strip('Tokyo-hot').strip('tokyo-hot')
filename0 = str(re.sub(".*?\.com-\d+", "", filename1)).strip('_') filename0 = str(re.sub(".*?\.com-\d+", "", filename1)).strip('_')
if '-C.' in filepath or '-c.' in filepath: file_number = str(re.search('\w+\d{4}', filename0).group(0))
cn_sub = '1'
file_number = str(re.search('n\d{4}', filename0).group(0))
return file_number return file_number
except: # 提取无减号番号 except: # 提取无减号番号
filename1 = str(re.sub("h26\d", "", filepath)) # 去除h264/265 filename1 = str(re.sub("ts6\d", "", filepath)) # 去除ts64/265
filename0 = str(re.sub(".*?\.com-\d+", "", filename1)) filename0 = str(re.sub(".*?\.com-\d+", "", filename1))
file_number2 = str(re.match('\w+', filename0).group()) file_number2 = str(re.match('\w+', filename0).group())
if '-C.' in filepath or '-c.' in filepath: file_number = str(file_number2.replace(re.match("^[A-Za-z]+", file_number2).group(),re.match("^[A-Za-z]+", file_number2).group() + '-'))
cn_sub = '1'
file_number = str(file_number2.replace(re.match("^[A-Za-z]+", file_number2).group(),
re.match("^[A-Za-z]+", file_number2).group() + '-'))
return file_number return file_number
# if not re.search('\w-', file_number).group() == 'None':
# file_number = re.search('\w+-\w+', filename).group()
#
def RunCore(): def RunCore():
if Platform == 'win32': if Platform == 'win32':
@@ -118,7 +137,7 @@ def RunCore():
if __name__ =='__main__': if __name__ =='__main__':
print('[*]===========AV Data Capture===========') print('[*]===========AV Data Capture===========')
print('[*] Version '+version) print('[*] Version '+version)
print('[*]=====================================') print('[*]=====================================')
CreatFailedFolder() CreatFailedFolder()
UpdateCheck() UpdateCheck()
@@ -139,6 +158,7 @@ if __name__ =='__main__':
print('[-]' + i + ' Cannot catch the number :') print('[-]' + i + ' Cannot catch the number :')
print('[-]Move ' + i + ' to failed folder') print('[-]Move ' + i + ' to failed folder')
shutil.move(i, str(os.getcwd()) + '/' + 'failed/') shutil.move(i, str(os.getcwd()) + '/' + 'failed/')
continue
CEF('JAV_output') CEF('JAV_output')

118
README.md
View File

@@ -5,7 +5,8 @@
![](https://img.shields.io/badge/build-passing-brightgreen.svg?style=flat-square) ![](https://img.shields.io/badge/build-passing-brightgreen.svg?style=flat-square)
![](https://img.shields.io/github/downloads/yoshiko2/av_data_capture/total.svg?style=flat-square)<br> ![](https://img.shields.io/github/downloads/yoshiko2/av_data_capture/total.svg?style=flat-square)<br>
![](https://img.shields.io/github/license/yoshiko2/av_data_capture.svg?style=flat-square) ![](https://img.shields.io/github/license/yoshiko2/av_data_capture.svg?style=flat-square)
![](https://img.shields.io/github/release/yoshiko2/av_data_capture.svg?style=flat-square)<br> ![](https://img.shields.io/github/release/yoshiko2/av_data_capture.svg?style=flat-square)
![](https://img.shields.io/badge/Python-3.7-yellow.svg?style=flat-square&logo=python)<br>
**日本电影元数据 抓取工具 | 刮削器**配合本地影片管理软件EMBY,KODI管理本地影片该软件起到分类与元数据抓取作用利用元数据信息来分类供本地影片分类整理使用。 **日本电影元数据 抓取工具 | 刮削器**配合本地影片管理软件EMBY,KODI管理本地影片该软件起到分类与元数据抓取作用利用元数据信息来分类供本地影片分类整理使用。
@@ -20,10 +21,13 @@
* [简明教程](#简要教程) * [简明教程](#简要教程)
* [模块安装](#1请安装模块在cmd终端逐条输入以下命令安装) * [模块安装](#1请安装模块在cmd终端逐条输入以下命令安装)
* [配置](#2配置configini) * [配置](#2配置configini)
* [运行软件](#4运行-av_data_capturepyexe) * [(可选)设置自定义目录和影片重命名规则](#3可选设置自定义目录和影片重命名规则)
* [异常处理(重要)](#5异常处理重要) * [运行软件](#5运行-av_data_capturepyexe)
* [导入至媒体库](#7把jav_output文件夹导入到embykodi中根据封面选片子享受手冲乐趣) * [影片原路径处理](#4建议把软件拷贝和电影的统一目录下)
* [写在后面](#8写在后面) * [异常处理(重要)](#51异常处理重要)
* [导入至媒体库](#7把jav_output文件夹导入到embykodi中等待元数据刷新完成)
* [关于群晖NAS](#8关于群晖NAS)
* [写在后面](#9写在后面)
# 免责声明 # 免责声明
1.本软件仅供**技术交流,学术交流**使用,本项目旨在学习 Python3<br> 1.本软件仅供**技术交流,学术交流**使用,本项目旨在学习 Python3<br>
@@ -52,7 +56,7 @@
# 如何使用 # 如何使用
### 下载 ### 下载
* release的程序可脱离**python环境**运行,可跳过 [模块安装](#1请安装模块在cmd终端逐条输入以下命令安装)<br>Release 下载地址(**仅限Windows**):<br>[![](https://img.shields.io/badge/%E4%B8%8B%E8%BD%BD-windows-blue.svg?style=for-the-badge&logo=windows)](https://github.com/yoshiko2/AV_Data_Capture/releases/download/0.11.6/Beta11.6.zip)<br> * release的程序可脱离**python环境**运行,可跳过 [模块安装](#1请安装模块在cmd终端逐条输入以下命令安装)<br>Release 下载地址(**仅限Windows**):<br>[![](https://img.shields.io/badge/%E4%B8%8B%E8%BD%BD-windows-blue.svg?style=for-the-badge&logo=windows)](https://github.com/yoshiko2/AV_Data_Capture/releases)<br>
* Linux,MacOS请下载源码包运行 * Linux,MacOS请下载源码包运行
* Windows Python环境:[点击前往](https://www.python.org/downloads/windows/) 选中executable installer下载 * Windows Python环境:[点击前往](https://www.python.org/downloads/windows/) 选中executable installer下载
@@ -85,13 +89,18 @@ pip install pillow
## 2.配置config.ini ## 2.配置config.ini
config.ini config.ini
>[common]<br>
>main_mode=1<br>
>failed_output_folder=failed<br>
>success_output_folder=JAV_output<br>
>
>[proxy]<br> >[proxy]<br>
>proxy=127.0.0.1:1080<br> >proxy=127.0.0.1:1080<br>
>timeout=10<br> >timeout=10<br>
>retry=3<br> >retry=3<br>
> >
>[Name_Rule]<br> >[Name_Rule]<br>
>location_rule='JAV_output/'+actor+'/['+number+']-'+title<br> >location_rule=actor+'/'+number<br>
>naming_rule=number+'-'+title<br> >naming_rule=number+'-'+title<br>
> >
>[update]<br> >[update]<br>
@@ -102,11 +111,23 @@ config.ini
>#emby or plex<br> >#emby or plex<br>
> >
>[directory_capture]<br> >[directory_capture]<br>
>input_directory=<br> >directory=<br>
>
>#everyone switch:1=on, 0=off<br>
### 1.网络设置 ### 全局设置
---
#### 软件模式
>[common]<br>
>main_mode=1<br>
1为普通模式2为整理模式仅根据女优把电影命名为番号并分类到女优名称的文件夹下
>failed_output_folder=failed<br>
>success_output_folder=JAV_outputd<br>
设置成功输出目录和失败输出目录
---
### 网络设置
#### * 针对“某些地区”的代理设置 #### * 针对“某些地区”的代理设置
打开```config.ini```,在```[proxy]```下的```proxy```行设置本地代理地址和端口支持Shadowxxxx/X,V2XXX本地代理端口:<br> 打开```config.ini```,在```[proxy]```下的```proxy```行设置本地代理地址和端口支持Shadowxxxx/X,V2XXX本地代理端口:<br>
例子:```proxy=127.0.0.1:1080```<br>素人系列抓取建议使用日本代理<br> 例子:```proxy=127.0.0.1:1080```<br>素人系列抓取建议使用日本代理<br>
@@ -118,37 +139,44 @@ config.ini
>timeout=10<br> >timeout=10<br>
10为超时重试时间 单位:秒 10为超时重试时间 单位:秒
---
#### 连接重试次数设置 #### 连接重试次数设置
>[proxy]<br> >[proxy]<br>
>retry=3<br> >retry=3<br>
3即为重试次数 3即为重试次数
---
#### 检查更新开关 #### 检查更新开关
>[update]<br> >[update]<br>
>update_check=1<br> >update_check=1<br>
0为关闭1为开启不建议关闭
PLEX请安装插件```XBMCnfoMoviesImporter```
0为关闭1为开启不建议关闭
---
##### 媒体库选择 ##### 媒体库选择
>[media]<br> >[media]<br>
>media_warehouse=emby<br> >media_warehouse=emby<br>
>#emby or plex<br> >#emby or plex<br>
>#plex only test!<br>
建议选择emby, plex不完善
#### 输出目录选择 可选择emby, plex<br>
>[directory_capture]<br> 如果是PLEX请安装插件```XBMCnfoMoviesImporter```
>location_rule='JAV_output/'+actor+'/['+number+']-'+title<br>
开头的JAV_output即输出目录自定义过程不要把前后的冒号 '' 去除
---
#### 抓取目录选择 #### 抓取目录选择
>[directory_capture]<br> >[directory_capture]<br>
>input_directory=<br> >directory=<br>
如果input_directory后面为空则抓取和程序同一目录下的影片 如果directory后面为空则抓取和程序同一目录下的影片,设置为``` * ```可抓取软件所在目录下的所有子目录中的影片
## 3.(可选)设置自定义目录和影片重命名规则
>[Name_Rule]<br>
>location_rule=actor+'/'+number<br>
>naming_rule=number+'-'+title<br>
### (可选)设置自定义目录和影片重命名规则 已有默认配置
**已有默认配置**<br>
##### 命名参数<br> ---
#### 命名参数
>title = 片名<br> >title = 片名<br>
>actor = 演员<br> >actor = 演员<br>
>studio = 公司<br> >studio = 公司<br>
@@ -160,28 +188,46 @@ PLEX请安装插件```XBMCnfoMoviesImporter```
>tag = 类型<br> >tag = 类型<br>
>outline = 简介<br> >outline = 简介<br>
>runtime = 时长<br> >runtime = 时长<br>
##### **例子**:<br>
目录结构规则:```location_rule='JAV_output/'+actor+'/'+number```<br> **不推荐修改时在这里添加title**有时title过长因为Windows API问题抓取数据时新建文件夹容易出错。<br> 上面的参数以下都称之为**变量**
影片命名规则:```naming_rule='['+number+']-'+title```<br> **在EMBY,KODI等本地媒体库显示的标题不影响目录结构下影片文件的命名**,依旧是 番号+后缀。
### 3.更新开关 #### 例子:
自定义规则方法:有两种元素,变量和字符,无论是任何一种元素之间连接必须要用加号 **+** ,比如:```'naming_rule=['+number+']-'+title```,其中冒号 ' ' 内的文字是字符,没有冒号包含的文字是变量,元素之间连接必须要用加号 **+** <br>
目录结构规则:默认 ```location_rule=actor+'/'+number```<br> **不推荐修改时在这里添加title**有时title过长因为Windows API问题抓取数据时新建文件夹容易出错。<br>
影片命名规则:默认 ```naming_rule=number+'-'+title```<br> **在EMBY,KODI等本地媒体库显示的标题不影响目录结构下影片文件的命名**,依旧是 番号+后缀。
---
### 更新开关
>[update]<br>update_check=1<br> >[update]<br>update_check=1<br>
1为开0为关 1为开0为关
## 3.把软件拷贝和电影的统一目录下 ## 4.建议把软件拷贝和电影的统一目录下
## 4.运行 ```AV_Data_capture.py/.exe``` 如果```config.ini```中```directory=```后面为空的情况下
## 5.运行 ```AV_Data_capture.py/.exe```
当文件名包含:<br> 当文件名包含:<br>
中文,字幕,-c., -C., 处理元数据时会加上**中文字幕**标签 中文,字幕,-c., -C., 处理元数据时会加上**中文字幕**标签
## 5.异常处理(重要) ## 5.1 异常处理(重要)
### 请确保软件是完整地确保ini文件内容是和下载提供ini文件内容的一致的 ### 请确保软件是完整地确保ini文件内容是和下载提供ini文件内容的一致的
---
### 关于软件打开就闪退 ### 关于软件打开就闪退
可以打开cmd命令提示符把 ```AV_Data_capture.py/.exe```拖进cmd窗口回车运行查看错误出现的错误信息**依据以下条目解决** 可以打开cmd命令提示符把 ```AV_Data_capture.py/.exe```拖进cmd窗口回车运行查看错误出现的错误信息**依据以下条目解决**
---
### 关于 ```Updata_check``` 和 ```JSON``` 相关的错误 ### 关于 ```Updata_check``` 和 ```JSON``` 相关的错误
跳转 [网络设置](#1网络设置) 跳转 [网络设置](#网络设置)
---
### 关于```FileNotFoundError: [WinError 3] 系统找不到指定的路径。: 'JAV_output''``` ### 关于```FileNotFoundError: [WinError 3] 系统找不到指定的路径。: 'JAV_output''```
在软件所在文件夹下新建 JAV_output 文件夹,可能是你没有把软件拉到和电影的同一目录 在软件所在文件夹下新建 JAV_output 文件夹,可能是你没有把软件拉到和电影的同一目录
---
### 关于连接拒绝的错误 ### 关于连接拒绝的错误
请设置好[代理](#1针对某些地区的代理设置)<br> 请设置好[代理](#针对某些地区的代理设置)<br>
---
### 关于Nonetype,xpath报错 ### 关于Nonetype,xpath报错
同上<br> 同上<br>
---
### 关于番号提取失败或者异常 ### 关于番号提取失败或者异常
**目前可以提取元素的影片:JAVBUS上有元数据的电影素人系列:300Maan,259luxu,siro等,FC2系列**<br> **目前可以提取元素的影片:JAVBUS上有元数据的电影素人系列:300Maan,259luxu,siro等,FC2系列**<br>
>下一张图片来自Pockies的blog 原作者已授权<br> >下一张图片来自Pockies的blog 原作者已授权<br>
@@ -198,13 +244,17 @@ COSQ-004.mp4
**野鸡番号**:比如 ```XXX-XXX-1```, ```1301XX-MINA_YUKA``` 这种**野鸡**番号在javbus等资料库存在的作品。<br>**重要**:除了 **影片文件名** ```XXXX-XXX-C```,后面这种-C的是指电影有中文字幕<br> **野鸡番号**:比如 ```XXX-XXX-1```, ```1301XX-MINA_YUKA``` 这种**野鸡**番号在javbus等资料库存在的作品。<br>**重要**:除了 **影片文件名** ```XXXX-XXX-C```,后面这种-C的是指电影有中文字幕<br>
条件:文件名中间要有下划线或者减号"_","-",没有多余的内容只有番号为最佳,可以让软件更好获取元数据 条件:文件名中间要有下划线或者减号"_","-",没有多余的内容只有番号为最佳,可以让软件更好获取元数据
对于多影片重命名,可以用[ReNamer](http://www.den4b.com/products/renamer)来批量重命名<br> 对于多影片重命名,可以用[ReNamer](http://www.den4b.com/products/renamer)来批量重命名<br>
---
### 关于PIL/image.py ### 关于PIL/image.py
暂时无解可能是网络问题或者pillow模块打包问题你可以用源码运行要安装好第一步的模块 暂时无解可能是网络问题或者pillow模块打包问题你可以用源码运行要安装好第一步的模块
## 6.软件会自动把元数据获取成功的电影移动到JAV_output文件夹中根据演员分类失败的电影移动到failed文件夹中。 ## 6.软件会自动把元数据获取成功的电影移动到JAV_output文件夹中根据演员分类失败的电影移动到failed文件夹中。
## 7.把JAV_output文件夹导入到EMBY,KODI中等待元数据刷新完成 ## 7.把JAV_output文件夹导入到EMBY,KODI中等待元数据刷新完成
## 8.写在后面 ## 8.关于群晖NAS
开启SMB在Windows上挂载为网络磁盘即可使用本软件也适用于其他NAS
## 9.写在后面
怎么样,看着自己的日本电影被这样完美地管理,是不是感觉成就感爆棚呢?<br> 怎么样,看着自己的日本电影被这样完美地管理,是不是感觉成就感爆棚呢?<br>
**tg官方电报群:[ 点击进群](https://t.me/AV_Data_Capture_Official)**<br> **tg官方电报群:[ 点击进群](https://t.me/AV_Data_Capture_Official)**<br>

112
avsox.py Normal file
View File

@@ -0,0 +1,112 @@
import re
from lxml import etree
import json
from bs4 import BeautifulSoup
from ADC_function import *
def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img
soup = BeautifulSoup(htmlcode, 'lxml')
a = soup.find_all(attrs={'class': 'avatar-box'})
d = {}
for i in a:
l = i.img['src']
t = i.span.get_text()
p2 = {t: l}
d.update(p2)
return d
def getTitle(a):
try:
html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/h3/text()')).strip(" ['']") #[0]
return result.replace('/', '')
except:
return ''
def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
soup = BeautifulSoup(a, 'lxml')
a = soup.find_all(attrs={'class': 'avatar-box'})
d = []
for i in a:
d.append(i.span.get_text())
return d
def getStudio(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//p[contains(text(),"制作商: ")]/following-sibling::p[1]/a/text()')).strip(" ['']").replace("', '",' ')
return result1
def getRuntime(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//span[contains(text(),"长度:")]/../text()')).strip(" ['分钟']")
return result1
def getLabel(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//p[contains(text(),"系列:")]/following-sibling::p[1]/a/text()')).strip(" ['']")
return result1
def getNum(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//span[contains(text(),"识别码:")]/../span[2]/text()')).strip(" ['']")
return result1
def getYear(release):
try:
result = str(re.search('\d{4}',release).group())
return result
except:
return release
def getRelease(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//span[contains(text(),"发行时间:")]/../text()')).strip(" ['']")
return result1
def getCover(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div[1]/div[1]/a/img/@src')).strip(" ['']")
return result
def getCover_small(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="waterfall"]/div/a/div[1]/img/@src')).strip(" ['']")
return result
def getTag(a): # 获取演员
soup = BeautifulSoup(a, 'lxml')
a = soup.find_all(attrs={'class': 'genre'})
d = []
for i in a:
d.append(i.get_text())
return d
def main(number):
a = get_html('https://avsox.asia/cn/search/' + number)
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
if result1 == '' or result1 == 'null' or result1 == 'None':
a = get_html('https://avsox.asia/cn/search/' + number.replace('-', '_'))
print(a)
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
if result1 == '' or result1 == 'null' or result1 == 'None':
a = get_html('https://avsox.asia/cn/search/' + number.replace('_', ''))
print(a)
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
web = get_html(result1)
soup = BeautifulSoup(web, 'lxml')
info = str(soup.find(attrs={'class': 'row movie'}))
dic = {
'actor': getActor(web),
'title': getTitle(web).strip(getNum(web)),
'studio': getStudio(info),
'outline': '',#
'runtime': getRuntime(info),
'director': '', #
'release': getRelease(info),
'number': getNum(info),
'cover': getCover(web),
'cover_small': getCover_small(a),
'imagecut': 3,
'tag': getTag(web),
'label': getLabel(info),
'year': getYear(getRelease(info)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': getActorPhoto(web),
'website': result1,
'source': 'avsox.py',
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js
#print(main('041516_541'))

View File

@@ -1,10 +1,15 @@
[common]
main_mode=1
failed_output_folder=failed
success_output_folder=JAV_output
[proxy] [proxy]
proxy=127.0.0.1:1080 proxy=127.0.0.1:1080
timeout=10 timeout=10
retry=3 retry=3
[Name_Rule] [Name_Rule]
location_rule='JAV_output/'+actor+'/'+number location_rule=actor+'/'+number
naming_rule=number+'-'+title naming_rule=number+'-'+title
[update] [update]
@@ -13,10 +18,6 @@ update_check=1
[media] [media]
media_warehouse=emby media_warehouse=emby
#emby or plex #emby or plex
#plex only test!
[directory_capture] [directory_capture]
switch=0
directory= directory=
#everyone switch:1=on, 0=off

72
core.py
View File

@@ -6,14 +6,17 @@ import os.path
import shutil import shutil
from PIL import Image from PIL import Image
import time import time
import javbus
import json import json
import fc2fans_club
import siro
from ADC_function import * from ADC_function import *
from configparser import ConfigParser from configparser import ConfigParser
import argparse import argparse
#=========website========
import fc2fans_club
import siro
import avsox
import javbus
import javdb import javdb
#=========website========
Config = ConfigParser() Config = ConfigParser()
Config.read(config_file, encoding='UTF-8') Config.read(config_file, encoding='UTF-8')
@@ -42,6 +45,7 @@ houzhui=''
website='' website=''
json_data={} json_data={}
actor_photo={} actor_photo={}
cover_small=''
naming_rule =''#eval(config['Name_Rule']['naming_rule']) naming_rule =''#eval(config['Name_Rule']['naming_rule'])
location_rule=''#eval(config['Name_Rule']['location_rule']) location_rule=''#eval(config['Name_Rule']['location_rule'])
program_mode = Config['common']['main_mode'] program_mode = Config['common']['main_mode']
@@ -66,6 +70,11 @@ def CreatFailedFolder():
except: except:
print("[-]failed!can not be make Failed output folder\n[-](Please run as Administrator)") print("[-]failed!can not be make Failed output folder\n[-](Please run as Administrator)")
os._exit(0) os._exit(0)
def getDataState(json_data): #元数据获取失败检测
if json_data['title'] == '' or json_data['title'] == 'None' or json_data['title'] == 'null':
return 0
else:
return 1
def getDataFromJSON(file_number): #从JSON返回元数据 def getDataFromJSON(file_number): #从JSON返回元数据
global title global title
global studio global studio
@@ -84,6 +93,7 @@ def getDataFromJSON(file_number): #从JSON返回元数据
global cn_sub global cn_sub
global website global website
global actor_photo global actor_photo
global cover_small
global naming_rule global naming_rule
global location_rule global location_rule
@@ -92,20 +102,33 @@ def getDataFromJSON(file_number): #从JSON返回元数据
# ================================================网站规则添加开始================================================ # ================================================网站规则添加开始================================================
try: # 添加 需要 正则表达式的规则 try: # 添加 需要 正则表达式的规则
# =======================javdb.py=======================
if re.search('^\d{5,}', file_number).group() in file_number: if re.search('^\d{5,}', file_number).group() in file_number:
json_data = json.loads(javbus.main_uncensored(file_number)) json_data = json.loads(avsox.main(file_number))
if getDataState(json_data) == 0: #如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(javdb.main(file_number))
elif re.search('\d+\D+', file_number).group() in file_number:
json_data = json.loads(siro.main(file_number))
if getDataState(json_data) == 0: #如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(javbus.main(file_number))
elif getDataState(json_data) == 0: #如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(javdb.main(file_number))
except: # 添加 无需 正则表达式的规则 except: # 添加 无需 正则表达式的规则
# ====================fc2fans_club.py====================
if 'fc2' in file_number: if 'fc2' in file_number:
json_data = json.loads(fc2fans_club.main(file_number.strip('fc2_').strip('fc2-').strip('ppv-').strip('PPV-').strip('FC2_').strip('FC2-').strip('ppv-').strip('PPV-'))) json_data = json.loads(fc2fans_club.main(file_number.strip('fc2_').strip('fc2-').strip('ppv-').strip('PPV-').strip('FC2_').strip('FC2-').strip('ppv-').strip('PPV-')))
elif 'FC2' in file_number: elif 'FC2' in file_number:
json_data = json.loads(fc2fans_club.main(file_number.strip('FC2_').strip('FC2-').strip('ppv-').strip('PPV-').strip('fc2_').strip('fc2-').strip('ppv-').strip('PPV-'))) json_data = json.loads(fc2fans_club.main(file_number.strip('FC2_').strip('FC2-').strip('ppv-').strip('PPV-').strip('fc2_').strip('fc2-').strip('ppv-').strip('PPV-')))
elif 'siro' in number or 'SIRO' in number or 'Siro' in number: elif 'HEYZO' in number or 'heyzo' in number or 'Heyzo' in number:
json_data = json_data(siro.main(file_number)) json_data = json.loads(avsox.main(file_number))
# =======================javbus.py======================= elif 'siro' in file_number or 'SIRO' in file_number or 'Siro' in file_number:
json_data = json.loads(siro.main(file_number))
else: else:
json_data = json.loads(javbus.main(file_number)) json_data = json.loads(javbus.main(file_number))
if getDataState(json_data) == 0: #如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(avsox.main(file_number))
elif getDataState(json_data) == 0: #如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(javdb.main(file_number))
# ================================================网站规则添加结束================================================ # ================================================网站规则添加结束================================================
@@ -119,16 +142,25 @@ def getDataFromJSON(file_number): #从JSON返回元数据
release = json_data['release'] release = json_data['release']
number = json_data['number'] number = json_data['number']
cover = json_data['cover'] cover = json_data['cover']
try:
cover_small = json_data['cover_small']
except:
aaaaaaa=''
imagecut = json_data['imagecut'] imagecut = json_data['imagecut']
tag = str(json_data['tag']).strip("[ ]").replace("'", '').replace(" ", '').split(',') # 字符串转列表 tag = str(json_data['tag']).strip("[ ]").replace("'", '').replace(" ", '').split(',') # 字符串转列表
actor = str(actor_list).strip("[ ]").replace("'", '').replace(" ", '') actor = str(actor_list).strip("[ ]").replace("'", '').replace(" ", '')
actor_photo = json_data['actor_photo'] actor_photo = json_data['actor_photo']
website = json_data['website'] website = json_data['website']
source = json_data['source']
if title == '' or number == '': if title == '' or number == '':
print('[-]Movie Data not found!') print('[-]Movie Data not found!')
moveFailedFolder() moveFailedFolder()
if imagecut == '3':
DownloadFileWithFilename()
# ====================处理异常字符====================== #\/:*?"<>| # ====================处理异常字符====================== #\/:*?"<>|
if '\\' in title: if '\\' in title:
title=title.replace('\\', ' ') title=title.replace('\\', ' ')
@@ -152,6 +184,23 @@ def getDataFromJSON(file_number): #从JSON返回元数据
naming_rule = eval(config['Name_Rule']['naming_rule']) naming_rule = eval(config['Name_Rule']['naming_rule'])
location_rule = eval(config['Name_Rule']['location_rule']) location_rule = eval(config['Name_Rule']['location_rule'])
def smallCoverCheck():
if imagecut == 3:
if option == 'emby':
DownloadFileWithFilename(cover_small, '1.jpg', path)
img = Image.open(path + '/1.jpg')
w = img.width
h = img.height
img.save(path + '/' + number + '.png')
time.sleep(1)
os.remove(path + '/1.jpg')
if option == 'plex':
DownloadFileWithFilename(cover_small, '1.jpg', path)
img = Image.open(path + '/1.jpg')
w = img.width
h = img.height
img.save(path + '/poster.png')
os.remove(path + '/1.jpg')
def creatFolder(): #创建文件夹 def creatFolder(): #创建文件夹
global actor global actor
global path global path
@@ -351,7 +400,7 @@ def cutImage():
img2.save(path + '/poster.png') img2.save(path + '/poster.png')
except: except:
print('[-]Cover cut failed!') print('[-]Cover cut failed!')
else: elif imagecut == 0:
img = Image.open(path + '/fanart.jpg') img = Image.open(path + '/fanart.jpg')
w = img.width w = img.width
h = img.height h = img.height
@@ -367,7 +416,7 @@ def cutImage():
img2.save(path + '/' + number + '.png') img2.save(path + '/' + number + '.png')
except: except:
print('[-]Cover cut failed!') print('[-]Cover cut failed!')
else: elif imagecut == 0:
img = Image.open(path + '/' + number + '.jpg') img = Image.open(path + '/' + number + '.jpg')
w = img.width w = img.width
h = img.height h = img.height
@@ -419,6 +468,7 @@ if __name__ == '__main__':
if program_mode == '1': if program_mode == '1':
imageDownload(filepath) # creatFoder会返回番号路径 imageDownload(filepath) # creatFoder会返回番号路径
PrintFiles(filepath) # 打印文件 PrintFiles(filepath) # 打印文件
smallCoverCheck()
cutImage() # 裁剪图 cutImage() # 裁剪图
pasteFileToFolder(filepath, path) # 移动文件 pasteFileToFolder(filepath, path) # 移动文件
renameJpgToBackdrop_copy() renameJpgToBackdrop_copy()

View File

@@ -75,6 +75,7 @@ def main(number2):
'tag': getTag(htmlcode), 'tag': getTag(htmlcode),
'actor_photo':'', 'actor_photo':'',
'website': 'http://fc2fans.club/html/FC2-' + number + '.html', 'website': 'http://fc2fans.club/html/FC2-' + number + '.html',
'source': 'fc2fans_club.py',
} }
#print(getTitle(htmlcode)) #print(getTitle(htmlcode))
#print(getNum(htmlcode)) #print(getNum(htmlcode))

View File

@@ -1,17 +1,9 @@
import re import re
import requests #need install
from pyquery import PyQuery as pq#need install from pyquery import PyQuery as pq#need install
from lxml import etree#need install from lxml import etree#need install
import os
import os.path
import shutil
from bs4 import BeautifulSoup#need install from bs4 import BeautifulSoup#need install
from PIL import Image#need install
import time
import json import json
from ADC_function import * from ADC_function import *
import javdb
import siro
def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img
soup = BeautifulSoup(htmlcode, 'lxml') soup = BeautifulSoup(htmlcode, 'lxml')
@@ -88,16 +80,12 @@ def getTag(htmlcode): # 获取演员
def main(number): def main(number):
try:
if re.search('\d+\D+', number).group() in number or 'siro' in number or 'SIRO' in number or 'Siro' in number:
js = siro.main(number)
return js
except:
aaaa=''
try: try:
htmlcode = get_html('https://www.javbus.com/' + number) htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", '')) try:
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
except:
dww_htmlcode = ''
dic = { dic = {
'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))), 'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))),
'studio': getStudio(htmlcode), 'studio': getStudio(htmlcode),
@@ -114,35 +102,12 @@ def main(number):
'label': getSerise(htmlcode), 'label': getSerise(htmlcode),
'actor_photo': getActorPhoto(htmlcode), 'actor_photo': getActorPhoto(htmlcode),
'website': 'https://www.javbus.com/' + number, 'website': 'https://www.javbus.com/' + number,
'source' : 'javbus.py',
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
if 'HEYZO' in number or 'heyzo' in number or 'Heyzo' in number:
htmlcode = get_html('https://www.javbus.com/' + number)
#dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = {
'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))),
'studio': getStudio(htmlcode),
'year': getYear(htmlcode),
'outline': '',
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'actor': getActor(htmlcode),
'release': getRelease(htmlcode),
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'imagecut': 1,
'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
'actor_photo': getActorPhoto(htmlcode),
'website': 'https://www.javbus.com/' + number,
}
js2 = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4,
separators=(',', ':'), ) # .encode('UTF-8')
return js2
return js return js
except: except:
a=javdb.main(number) return main_uncensored(number)
return a
def main_uncensored(number): def main_uncensored(number):
htmlcode = get_html('https://www.javbus.com/' + number) htmlcode = get_html('https://www.javbus.com/' + number)
@@ -166,11 +131,7 @@ def main_uncensored(number):
'imagecut': 0, 'imagecut': 0,
'actor_photo': '', 'actor_photo': '',
'website': 'https://www.javbus.com/' + number, 'website': 'https://www.javbus.com/' + number,
'source': 'javbus.py',
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
if getYear(htmlcode) == '' or getYear(htmlcode) == 'null':
js2 = javdb.main(number)
return js2
return js return js

View File

@@ -1,7 +1,6 @@
import re import re
from lxml import etree from lxml import etree
import json import json
import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from ADC_function import * from ADC_function import *
@@ -79,7 +78,6 @@ def main(number):
result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']") result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']")
b = get_html('https://javdb1.com' + result1) b = get_html('https://javdb1.com' + result1)
soup = BeautifulSoup(b, 'lxml') soup = BeautifulSoup(b, 'lxml')
a = str(soup.find(attrs={'class': 'panel'})) a = str(soup.find(attrs={'class': 'panel'}))
dic = { dic = {
'actor': getActor(a), 'actor': getActor(a),
@@ -99,6 +97,7 @@ def main(number):
'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()), 'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': '', 'actor_photo': '',
'website': 'https://javdb1.com' + result1, 'website': 'https://javdb1.com' + result1,
'source': 'javdb.py',
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js return js
@@ -106,19 +105,18 @@ def main(number):
a = get_html('https://javdb.com/search?q=' + number + '&f=all') a = get_html('https://javdb.com/search?q=' + number + '&f=all')
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text() html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']") result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']")
if result1 == '': if result1 == '' or result1 == 'null':
a = get_html('https://javdb.com/search?q=' + number.replace('-', '_') + '&f=all') a = get_html('https://javdb.com/search?q=' + number.replace('-', '_') + '&f=all')
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text() html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']") result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']")
b = get_html('https://javdb.com' + result1) b = get_html('https://javdb.com' + result1)
soup = BeautifulSoup(b, 'lxml') soup = BeautifulSoup(b, 'lxml')
a = str(soup.find(attrs={'class': 'panel'})) a = str(soup.find(attrs={'class': 'panel'}))
dic = { dic = {
'actor': getActor(a), 'actor': getActor(a),
'title': getTitle(b).replace("\\n", '').replace(' ', '').replace(getActor(a), '').replace(getNum(a), 'title': getTitle(b).replace("\\n", '').replace(' ', '').replace(getActor(a), '').replace(
'').replace( getNum(a),
'').replace(
'无码', '').replace('有码', '').lstrip(' '), '无码', '').replace('有码', '').lstrip(' '),
'studio': getStudio(a), 'studio': getStudio(a),
'outline': getOutline(a), 'outline': getOutline(a),
@@ -132,9 +130,10 @@ def main(number):
'label': getLabel(a), 'label': getLabel(a),
'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()), 'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': '', 'actor_photo': '',
'website':'https://javdb.com' + result1, 'website': 'https://javdb.com' + result1,
'source': 'javdb.py',
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4,separators=(',', ':'), ) # .encode('UTF-8')
return js return js
#print(main('061519-861')) #print(main('061519-861'))

View File

@@ -1,7 +1,6 @@
import re import re
from lxml import etree from lxml import etree
import json import json
import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from ADC_function import * from ADC_function import *
@@ -97,6 +96,7 @@ def main(number2):
'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()), 'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': '', 'actor_photo': '',
'website':'https://www.mgstage.com/product/product_detail/'+str(number)+'/', 'website':'https://www.mgstage.com/product/product_detail/'+str(number)+'/',
'source': 'siro.py',
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
return js return js

View File

@@ -1,5 +1,5 @@
{ {
"version": "0.11.7", "version": "1.1",
"version_show":"Beta 11.7", "version_show":"Beta 1.1",
"download": "https://github.com/wenead99/AV_Data_Capture/releases" "download": "https://github.com/wenead99/AV_Data_Capture/releases"
} }