203 Commits
0.10.1 ... 1.3

Author SHA1 Message Date
Yoshiko
f99def64bb Update README.md 2019-10-06 01:24:11 +08:00
Yoshiko
94c4838b42 Update update_check.json 2019-10-06 01:15:05 +08:00
Yoshiko
73c0126fb8 Update 1.3 2019-10-06 01:14:12 +08:00
Yoshiko
ae99c652f5 Update README.md 2019-09-23 09:27:29 +08:00
Yoshiko
2b9ce63601 Version 1.2 Update 2019-08-29 22:21:59 +08:00
Yoshiko
6928df8c3f Version 1.2 Update 2019-08-29 22:20:54 +08:00
Yoshiko
8ccdf7dc5a Merge pull request #36 from moyy996/master
AV_Data_Capture-1.1(支持分集)
2019-08-19 22:04:22 +08:00
mo_yy
b438312c97 Update core.py 2019-08-19 12:23:45 +08:00
mo_yy
fd05706636 Update core.py
支持分集
2019-08-19 10:18:32 +08:00
mo_yy
1e407ef962 Update core.py 2019-08-19 00:50:43 +08:00
Yoshiko
9898932f09 Update update_check.json 2019-08-18 22:40:37 +08:00
Yoshiko
c4fc22054b Update 1.1 2019-08-18 22:40:11 +08:00
Yoshiko
449e900837 Update README.md 2019-08-18 14:52:02 +08:00
Yoshiko
e3ebbec947 Update README.md 2019-08-14 21:56:25 +08:00
Yoshiko
65a9521ab1 Update README.md 2019-08-14 21:55:51 +08:00
Yoshiko
b79a600c0d Update README.md 2019-08-14 19:29:18 +08:00
Yoshiko
30d33fe8f7 Update README.md 2019-08-14 11:50:42 +08:00
Yoshiko
b325fc1f01 Update README.md 2019-08-14 11:49:00 +08:00
Yoshiko
954fb02c0c Update README.md 2019-08-14 00:28:39 +08:00
Yoshiko
5ee398d6b5 Update Beta 11.9 2019-08-12 01:23:57 +08:00
Yoshiko
b754c11814 Update update_check.json 2019-08-12 01:21:46 +08:00
Yoshiko
5d19ae594d Update Beta 11.9 2019-08-12 01:21:34 +08:00
Yoshiko
bfa8ed3144 Update README.md 2019-08-11 00:41:01 +08:00
Yoshiko
0ec23aaa38 Update README.md 2019-08-11 00:39:31 +08:00
Yoshiko
878ae46d77 Update README.md 2019-08-11 00:31:24 +08:00
Yoshiko
766e6bbd88 Update update_check.json 2019-08-11 00:29:15 +08:00
Yoshiko
0107c7d624 Update README.md 2019-08-11 00:28:25 +08:00
Yoshiko
d0cf2d2193 Update README.md 2019-08-11 00:27:36 +08:00
Yoshiko
d1403af548 Update README.md 2019-08-10 22:28:35 +08:00
Yoshiko
bc20b09f60 Update Beta 11.8 2019-08-10 21:45:36 +08:00
Yoshiko
8e2c0c3686 Version fallback to Beta 11.6 2019-08-09 00:32:57 +08:00
Yoshiko
446e1bf7d0 Version fallback to Beta 11.6 2019-08-09 00:32:04 +08:00
Yoshiko
54437236f0 Update Beta 11.7 2019-08-07 00:19:22 +08:00
Yoshiko
9ed57a8ae9 Update README.md 2019-08-07 00:15:33 +08:00
Yoshiko
c66a53ade1 Update Beta 11.7 2019-08-06 16:46:21 +08:00
Yoshiko
7aec4c4b84 Update update_check.json 2019-08-06 16:37:16 +08:00
Yoshiko
cfb3511360 Update Beta 11.7 2019-08-06 16:36:45 +08:00
Yoshiko
2adcfacf27 Merge pull request #26 from RRRRRm/master
Fix the path error under Linux and specify Python3 as the runtime.
2019-08-05 22:52:57 +08:00
RRRRRm
09dc684ff6 Fix some bugs. 2019-08-05 20:39:41 +08:00
RRRRRm
1bc924a6ac Update README.md 2019-08-05 15:57:46 +08:00
RRRRRm
00db4741bc Calling core.py asynchronously. Allow to specify input and output paths. 2019-08-05 15:48:44 +08:00
RRRRRm
1086447369 Fix the path error under Linux. Specify Python3 as the runtime. 2019-08-05 03:00:35 +08:00
Yoshiko
642c8103c7 Update README.md 2019-07-24 08:51:40 +08:00
Yoshiko
b053ae614c Update README.md 2019-07-23 21:18:22 +08:00
Yoshiko
b7583afc9b Merge pull request #20 from biaji/master
Add encoding info to source
2019-07-21 10:28:03 +08:00
biAji
731b08f843 Add encoding info to source
According to PEP-263, add encoding info to source code
2019-07-18 09:22:28 +08:00
Yoshiko
64f235aaff Update README.md 2019-07-15 12:41:14 +08:00
Yoshiko
f0d5a2a45d Update 11.6 2019-07-14 15:07:04 +08:00
Yoshiko
01521fe390 Update 11.6 2019-07-14 10:06:49 +08:00
Yoshiko
a33b882592 Update update_check.json 2019-07-14 09:59:56 +08:00
Yoshiko
150b81453c Update 11.6 2019-07-14 09:58:46 +08:00
Yoshiko
a6df479b78 Update 11.6 2019-07-14 09:45:53 +08:00
Yoshiko
dd6445b2ba Update 11.6 2019-07-14 09:38:26 +08:00
Yoshiko
41051a915b Update README.md 2019-07-12 18:13:09 +08:00
Yoshiko
32ce390939 Update README.md 2019-07-12 18:08:45 +08:00
Yoshiko
8deec6a6c0 Update README.md 2019-07-12 18:08:20 +08:00
Yoshiko
0fab70ff3d Update README.md 2019-07-12 18:07:23 +08:00
Yoshiko
53bbb99a64 Update README.md 2019-07-12 17:59:46 +08:00
Yoshiko
0e712de805 Update README.md 2019-07-11 10:43:55 +08:00
Yoshiko
6f74254e96 Update README.md 2019-07-11 00:58:16 +08:00
Yoshiko
4220bd708b Update README.md 2019-07-11 00:49:23 +08:00
Yoshiko
3802d88972 Update README.md 2019-07-11 00:46:22 +08:00
Yoshiko
8cddbf1e1b Update README.md 2019-07-11 00:41:40 +08:00
Yoshiko
332326e5f6 Update README.md 2019-07-09 18:52:36 +08:00
Yoshiko
27f64a81d0 Update README.md 2019-07-09 17:57:09 +08:00
Yoshiko
7e3fa5ade8 Update README.md 2019-07-09 17:56:48 +08:00
Yoshiko
cc362a2a26 Beta 11.5 Update 2019-07-09 17:47:43 +08:00
Yoshiko
dde6167b05 Update update_check.json 2019-07-09 17:47:02 +08:00
Yoshiko
fe69f42f92 Update README.md 2019-07-09 17:11:09 +08:00
Yoshiko
6b050cef43 Update README.md 2019-07-09 17:09:32 +08:00
Yoshiko
c721c3c769 Update README.md 2019-07-09 16:51:06 +08:00
Yoshiko
9f8702ca12 Update README.md 2019-07-09 16:50:35 +08:00
Yoshiko
153b3a35b8 Update README.md 2019-07-09 15:58:44 +08:00
Yoshiko
88e543a16f Update README.md 2019-07-09 13:51:52 +08:00
Yoshiko
5906af6d95 Update README.md 2019-07-09 13:43:09 +08:00
Yoshiko
39953f1870 Update README.md 2019-07-09 13:17:41 +08:00
Yoshiko
047618a0df Update README.md 2019-07-09 01:55:43 +08:00
Yoshiko
2da51a51d0 Update README.md 2019-07-09 01:45:35 +08:00
Yoshiko
8c0e0a296d Update README.md 2019-07-09 01:45:05 +08:00
Yoshiko
ce0ac607c2 Update README.md 2019-07-04 14:41:29 +08:00
Yoshiko
f0437cf6af Delete py to exe.bat 2019-07-04 03:01:18 +08:00
Yoshiko
32bfc57eed Update README.md 2019-07-04 02:58:48 +08:00
Yoshiko
909ca96915 Update README.md 2019-07-04 02:57:21 +08:00
Yoshiko
341ab5b2bf Update README.md 2019-07-04 02:55:09 +08:00
Yoshiko
d899a19419 Update README.md 2019-07-04 02:54:24 +08:00
Yoshiko
61b0bc40de Update README.md 2019-07-04 02:42:31 +08:00
Yoshiko
6fde3f98dd Delete proxy.ini 2019-07-04 02:26:42 +08:00
Yoshiko
838eb9c8db Update config.ini 2019-07-04 02:26:23 +08:00
Yoshiko
687bbfce10 Update update_check.json 2019-07-04 02:26:00 +08:00
Yoshiko
4b35113932 Beta 11.4 Update 2019-07-04 02:25:40 +08:00
Yoshiko
d672d4d0d7 Update README.md 2019-07-04 02:23:57 +08:00
Yoshiko
1d3845bb91 Update README.md 2019-07-04 02:22:06 +08:00
wenead99
e5effca854 Update README.md 2019-06-30 18:25:54 +08:00
wenead99
bae82898da Update README.md 2019-06-30 02:04:22 +08:00
wenead99
2e8e7151e3 Update README.md 2019-06-30 02:01:17 +08:00
wenead99
8db74bc34d Update README.md 2019-06-30 01:00:50 +08:00
wenead99
e18392d7d3 Update README.md 2019-06-30 00:58:08 +08:00
wenead99
e4e32c06df Update README.md 2019-06-30 00:54:56 +08:00
wenead99
09802c5632 Update README.md 2019-06-30 00:52:43 +08:00
wenead99
584db78fd0 Update README.md 2019-06-30 00:44:46 +08:00
wenead99
56a41604cb Update AV_Data_Capture.py 2019-06-29 19:03:27 +08:00
wenead99
8228084a1d Update README.md 2019-06-29 18:58:39 +08:00
wenead99
f16def5f3a Update update_check.json 2019-06-29 18:49:30 +08:00
wenead99
c0303a57a1 Beta 11.2 Update 2019-06-29 18:43:45 +08:00
wenead99
07c8a7fb0e Update README.md 2019-06-29 17:02:03 +08:00
wenead99
71691e1fe9 Beta 11.1 Update 2019-06-29 16:19:58 +08:00
wenead99
e2569e4541 Add files via upload 2019-06-29 10:37:29 +08:00
wenead99
51385491de Add files via upload 2019-06-29 10:34:40 +08:00
wenead99
bb049714cf Update update_check.json 2019-06-29 10:30:41 +08:00
wenead99
5dcaa20a6c Update README.md 2019-06-28 23:29:38 +08:00
wenead99
26652bf2ed Update README.md 2019-06-24 15:12:22 +08:00
wenead99
352d2fa28a Update README.md 2019-06-24 15:09:48 +08:00
wenead99
ff5ac0d599 Update README.md 2019-06-24 15:08:32 +08:00
wenead99
f34888d2e7 Update README.md 2019-06-23 14:27:39 +08:00
wenead99
f609e647b5 Update README.md 2019-06-23 14:26:27 +08:00
wenead99
ffc280a01c Update README.md 2019-06-23 14:24:13 +08:00
wenead99
fee0ae95b3 Update README.md 2019-06-23 11:18:26 +08:00
wenead99
cd7e254d2e Update README.md 2019-06-23 11:11:32 +08:00
wenead99
ce2995123d Update README.md 2019-06-23 01:08:27 +08:00
wenead99
46e676b592 Update README.md 2019-06-23 01:08:06 +08:00
wenead99
a435d645e4 Update README.md 2019-06-23 01:00:57 +08:00
wenead99
76eecd1e6f Update README.md 2019-06-23 01:00:33 +08:00
wenead99
3c296db204 Update README.md 2019-06-23 00:57:01 +08:00
wenead99
7d6408fe29 Update README.md 2019-06-23 00:56:44 +08:00
wenead99
337c84fd1c Update README.md 2019-06-23 00:55:02 +08:00
wenead99
ad220c1ca6 Update README.md 2019-06-23 00:54:48 +08:00
wenead99
37df711cdc Update README.md 2019-06-23 00:54:28 +08:00
wenead99
92dd9cb734 Update README.md 2019-06-23 00:51:40 +08:00
wenead99
64445b5105 Update README.md 2019-06-23 00:46:11 +08:00
wenead99
bfdb094ee3 Update README.md 2019-06-23 00:35:35 +08:00
wenead99
b38942a326 Update README.md 2019-06-23 00:34:55 +08:00
wenead99
7d03a1f7f9 Update README.md 2019-06-23 00:34:12 +08:00
wenead99
f9c0df7e06 Update README.md 2019-06-23 00:32:30 +08:00
wenead99
b1783d8c75 Update AV_Data_Capture.py 2019-06-22 19:22:23 +08:00
wenead99
908da6d006 Add files via upload 2019-06-22 19:20:54 +08:00
wenead99
9ec99143d4 Update update_check.json 2019-06-22 16:16:45 +08:00
wenead99
575a710ef8 Beta 10.6更新 2019-06-22 16:16:18 +08:00
wenead99
7c16307643 Update README.md 2019-06-22 16:11:07 +08:00
wenead99
e816529260 Update README.md 2019-06-22 16:10:40 +08:00
wenead99
8282e59a39 Update README.md 2019-06-22 16:08:20 +08:00
wenead99
a96bdb8d13 Update README.md 2019-06-22 16:05:29 +08:00
wenead99
f7f1c3e871 Update README.md 2019-06-22 16:05:01 +08:00
wenead99
632250083f Update README.md 2019-06-22 16:04:18 +08:00
wenead99
0ebfe43133 Update README.md 2019-06-22 16:03:03 +08:00
wenead99
bb367fe79e Update README.md 2019-06-22 15:56:56 +08:00
wenead99
3a4d405c8e Update README.md 2019-06-22 15:53:30 +08:00
wenead99
8f8adcddbb Update README.md 2019-06-22 15:52:06 +08:00
wenead99
394c831b05 Update README.md 2019-06-22 15:47:53 +08:00
wenead99
bb8b3a3bc3 Update update_check.json 2019-06-22 13:19:10 +08:00
wenead99
6c5c932b98 修改Ini文件导致的目录名无效BUG 2019-06-22 13:16:37 +08:00
wenead99
9a151a5d4c Update README.md 2019-06-22 01:44:28 +08:00
wenead99
f24595687b Beta 10.5 更新 2019-06-22 01:29:42 +08:00
wenead99
aa130d2d25 Update README.md 2019-06-22 01:18:44 +08:00
wenead99
bccc49508e Update README.md 2019-06-22 01:12:33 +08:00
wenead99
ad6db7ca97 Update README.md 2019-06-22 01:05:15 +08:00
wenead99
b95d35d6fa Update README.md 2019-06-22 01:04:38 +08:00
wenead99
3bf0cf5fbc Update README.md 2019-06-22 00:58:28 +08:00
wenead99
dbdc0c818d Update README.md 2019-06-22 00:57:45 +08:00
wenead99
e156c34e23 Update README.md 2019-06-22 00:55:46 +08:00
wenead99
ee782e3794 Update README.md 2019-06-22 00:55:01 +08:00
wenead99
90aa77a23a Update AV_Data_Capture.py 2019-06-22 00:46:43 +08:00
wenead99
d4251c8b44 Beta 10.5更新 2019-06-22 00:46:06 +08:00
wenead99
6f684e67e2 Beta 0.15 更新 2019-06-22 00:34:36 +08:00
wenead99
18cf202b5b Update README.md 2019-06-21 23:59:15 +08:00
wenead99
54b2b71472 Update README.md 2019-06-21 23:58:12 +08:00
wenead99
44ba47bafc Update README.md 2019-06-21 23:55:23 +08:00
wenead99
7eb72634d8 Update README.md 2019-06-21 20:07:44 +08:00
wenead99
5787d3470a Update README.md 2019-06-21 20:05:53 +08:00
wenead99
1fce045ac2 Update README.md 2019-06-21 20:05:09 +08:00
wenead99
794aa74782 Update README.md 2019-06-21 20:03:07 +08:00
wenead99
b2e49a99a7 Update README.md 2019-06-21 20:01:58 +08:00
wenead99
d208d53375 Update README.md 2019-06-21 20:00:15 +08:00
wenead99
7158378eca Update README.md 2019-06-21 19:59:55 +08:00
wenead99
0961d8cbe4 Update README.md 2019-06-21 19:59:41 +08:00
wenead99
6ef5d11742 Update README.md 2019-06-21 19:57:03 +08:00
wenead99
45e1d8370c Beta 10.4 更新 2019-06-21 18:27:21 +08:00
wenead99
420f995977 Update README.md 2019-06-21 18:26:25 +08:00
wenead99
dbe1f91bd9 Update README.md 2019-06-21 18:23:59 +08:00
wenead99
770c5fcb1f Update update_check.json 2019-06-21 17:54:41 +08:00
wenead99
665d1ffe43 Beta 10.4 2019-06-21 15:40:02 +08:00
wenead99
14ed221152 Update README.md 2019-06-21 10:53:34 +08:00
wenead99
c41b9c1e32 Update README.md 2019-06-21 10:16:14 +08:00
wenead99
17d4d68cbe Update README.md 2019-06-21 10:00:25 +08:00
wenead99
b5a23fe430 Beta 10.3 Update 2019.6.20 2019-06-21 00:03:43 +08:00
wenead99
2747be4a21 Update README.md 2019-06-20 20:49:40 +08:00
wenead99
02da503a2f Update update_check.json 2019-06-20 19:13:38 +08:00
wenead99
31c5d5c314 Update update_check.json 2019-06-20 19:10:28 +08:00
wenead99
22e5b9aa44 Update update_check.json 2019-06-20 19:07:42 +08:00
wenead99
400e8c9678 Update update_check.json 2019-06-20 19:03:24 +08:00
wenead99
b06e744c0c Beta 0.10.3更新检测 2019-06-19 20:53:10 +08:00
wenead99
ddbfe7765b Beta 10.3更新检测 2019-06-19 20:50:44 +08:00
wenead99
c0f47fb712 Update README.md 2019-06-19 18:22:31 +08:00
wenead99
7b0e8bf5f7 Beta 10.2 Update 2019-06-19 18:21:19 +08:00
wenead99
fa8ea58fe6 Beta 10.2 Update 2019-06-19 18:20:30 +08:00
wenead99
8c824e5d29 Beta 10.2 Update 2019-06-19 18:20:02 +08:00
wenead99
764fba74ec Beta 10.2 Update 2019-06-19 18:19:34 +08:00
wenead99
36c436772c Update README.md 2019-06-19 13:43:04 +08:00
wenead99
897a621adc Update README.md 2019-06-19 13:42:19 +08:00
wenead99
1f5802cdb4 Update README.md 2019-06-19 13:41:05 +08:00
wenead99
0a57e2bab6 Update README.md 2019-06-19 11:03:44 +08:00
wenead99
3ddfe94f2b Update README.md 2019-06-19 11:02:31 +08:00
wenead99
c6fd5ac565 Update README.md 2019-06-19 00:05:01 +08:00
wenead99
2a7cdcf12d Update README.md 2019-06-18 23:56:34 +08:00
13 changed files with 1357 additions and 582 deletions

100
ADC_function.py Normal file → Executable file
View File

@@ -1,35 +1,97 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests import requests
from configparser import ConfigParser from configparser import ConfigParser
import os import os
import re
import time
import sys
config_file='config.ini'
config = ConfigParser() config = ConfigParser()
if os.path.exists('proxy.ini'):
config.read('proxy.ini', encoding='UTF-8') if os.path.exists(config_file):
try:
config.read(config_file, encoding='UTF-8')
except:
print('[-]Config.ini read failed! Please use the offical file!')
else: else:
with open("proxy.ini", "wt", encoding='UTF-8') as code: print('[+]config.ini: not found, creating...')
with open("config.ini", "wt", encoding='UTF-8') as code:
print("[proxy]",file=code) print("[proxy]",file=code)
print("proxy=127.0.0.1:1080",file=code) print("proxy=127.0.0.1:1080",file=code)
print("timeout=10", file=code)
print("retry=3", file=code)
print("", file=code)
print("[Name_Rule]", file=code)
print("location_rule='JAV_output/'+actor+'/'+number",file=code)
print("naming_rule=number+'-'+title",file=code)
print("", file=code)
print("[update]",file=code)
print("update_check=1",file=code)
print("", file=code)
print("[media]", file=code)
print("media_warehouse=emby", file=code)
print("#emby or plex", file=code)
print("#plex only test!", file=code)
print("", file=code)
print("[directory_capture]", file=code)
print("switch=0", file=code)
print("directory=", file=code)
print("", file=code)
print("everyone switch:1=on, 0=off", file=code)
time.sleep(2)
print('[+]config.ini: created!')
try:
config.read(config_file, encoding='UTF-8')
except:
print('[-]Config.ini read failed! Please use the offical file!')
def get_html(url):#网页请求核心 def ReadMediaWarehouse():
return config['media']['media_warehouse']
def UpdateCheckSwitch():
check=str(config['update']['update_check'])
if check == '1':
return '1'
elif check == '0':
return '0'
elif check == '':
return '0'
def get_html(url,cookies = None):#网页请求核心
try:
proxy = config['proxy']['proxy']
timeout = int(config['proxy']['timeout'])
retry_count = int(config['proxy']['retry'])
except:
print('[-]Proxy config error! Please check the config.')
i = 0
while i < retry_count:
try:
if not str(config['proxy']['proxy']) == '': if not str(config['proxy']['proxy']) == '':
proxies = { proxies = {"http": "http://" + proxy,"https": "https://" + proxy}
"http" : "http://" + str(config['proxy']['proxy']),
"https": "https://" + str(config['proxy']['proxy'])
}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36'} headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36'}
getweb = requests.get(str(url), headers=headers, proxies=proxies) getweb = requests.get(str(url), headers=headers, timeout=timeout,proxies=proxies, cookies=cookies)
getweb.encoding = 'utf-8' getweb.encoding = 'utf-8'
# print(getweb.text)
try:
return getweb.text return getweb.text
except:
print('[-]Connected failed!:Proxy error')
else: else:
headers = { headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} getweb = requests.get(str(url), headers=headers, timeout=timeout, cookies=cookies)
getweb = requests.get(str(url), headers=headers)
getweb.encoding = 'utf-8' getweb.encoding = 'utf-8'
try:
return getweb.text return getweb.text
except: except requests.exceptions.RequestException:
print("[-]Connect Failed.") i += 1
print('[-]Connect retry '+str(i)+'/'+str(retry_count))
except requests.exceptions.ConnectionError:
i += 1
print('[-]Connect retry '+str(i)+'/'+str(retry_count))
except requests.exceptions.ProxyError:
i += 1
print('[-]Connect retry '+str(i)+'/'+str(retry_count))
except requests.exceptions.ConnectTimeout:
i += 1
print('[-]Connect retry '+str(i)+'/'+str(retry_count))
print('[-]Connect Failed! Please check your Proxy or Network!')

156
AV_Data_Capture.py Normal file → Executable file
View File

@@ -1,61 +1,153 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob import glob
import os import os
import time import time
import re import re
import sys import sys
from ADC_function import *
import json
import shutil
from configparser import ConfigParser
os.chdir(os.getcwd())
# ============global var===========
version='1.3'
config = ConfigParser()
config.read(config_file, encoding='UTF-8')
Platform = sys.platform
# ==========global var end=========
def UpdateCheck():
if UpdateCheckSwitch() == '1':
html2 = get_html('https://raw.githubusercontent.com/yoshiko2/AV_Data_Capture/master/update_check.json')
html = json.loads(str(html2))
if not version == html['version']:
print('[*] * New update ' + html['version'] + ' *')
print('[*] * Download *')
print('[*] ' + html['download'])
print('[*]=====================================')
else:
print('[+]Update Check disabled!')
def movie_lists(): def movie_lists():
#MP4 global exclude_directory_1
a2 = glob.glob(os.getcwd() + r"\*.mp4") global exclude_directory_2
# AVI directory = config['directory_capture']['directory']
b2 = glob.glob(os.getcwd() + r"\*.avi") total=[]
# RMVB file_type = ['mp4','avi','rmvb','wmv','mov','mkv','flv','ts']
c2 = glob.glob(os.getcwd() + r"\*.rmvb") exclude_directory_1 = config['common']['failed_output_folder']
# WMV exclude_directory_2 = config['common']['success_output_folder']
d2 = glob.glob(os.getcwd() + r"\*.wmv") if directory=='*':
# MOV remove_total = []
e2 = glob.glob(os.getcwd() + r"\*.mov") for o in file_type:
# MKV remove_total += glob.glob(r"./" + exclude_directory_1 + "/*." + o)
f2 = glob.glob(os.getcwd() + r"\*.mkv") remove_total += glob.glob(r"./" + exclude_directory_2 + "/*." + o)
# FLV for i in os.listdir(os.getcwd()):
g2 = glob.glob(os.getcwd() + r"\*.flv") for a in file_type:
total += glob.glob(r"./" + i + "/*." + a)
total = a2+b2+c2+d2+e2+f2+g2 for b in remove_total:
total.remove(b)
return total return total
for a in file_type:
total += glob.glob(r"./" + directory + "/*." + a)
return total
def CreatFailedFolder():
if not os.path.exists('failed/'): # 新建failed文件夹
try:
os.makedirs('failed/')
except:
print("[-]failed!can not be make folder 'failed'\n[-](Please run as Administrator)")
os._exit(0)
def lists_from_test(custom_nuber): #电影列表 def lists_from_test(custom_nuber): #电影列表
a=[] a=[]
a.append(custom_nuber) a.append(custom_nuber)
return a return a
def CEF(path): def CEF(path):
try:
files = os.listdir(path) # 获取路径下的子文件(夹)列表 files = os.listdir(path) # 获取路径下的子文件(夹)列表
for file in files: for file in files:
try: #试图删除空目录,非空目录删除会报错
os.removedirs(path + '/' + file) # 删除这个空文件夹 os.removedirs(path + '/' + file) # 删除这个空文件夹
print('[+]Deleting empty folder',path + '/' + file) print('[+]Deleting empty folder', path + '/' + file)
except: except:
a='' a=''
def rreplace(self, old, new, *max): def rreplace(self, old, new, *max):
#从右开始替换文件名中内容,源字符串,将被替换的子字符串, 新字符串用于替换old子字符串可选字符串, 替换不超过 max 次 #从右开始替换文件名中内容,源字符串,将被替换的子字符串, 新字符串用于替换old子字符串可选字符串, 替换不超过 max 次
count = len(self) count = len(self)
if max and str(max[0]).isdigit(): if max and str(max[0]).isdigit():
count = max[0] count = max[0]
return new.join(self.rsplit(old, count)) return new.join(self.rsplit(old, count))
def getNumber(filepath):
filepath = filepath.replace('.\\','')
try: # 普通提取番号 主要处理包含减号-的番号
filepath = filepath.replace("_", "-")
filepath.strip('22-sht.me').strip('-HD').strip('-hd')
filename = str(re.sub("\[\d{4}-\d{1,2}-\d{1,2}\] - ", "", filepath)) # 去除文件名中时间
try:
file_number = re.search('\w+-\d+', filename).group()
except: # 提取类似mkbd-s120番号
file_number = re.search('\w+-\w+\d+', filename).group()
return file_number
except: # 提取不含减号-的番号
try:
filename = str(re.sub("ts6\d", "", filepath)).strip('Tokyo-hot').strip('tokyo-hot')
filename = str(re.sub(".*?\.com-\d+", "", filename)).replace('_', '')
file_number = str(re.search('\w+\d{4}', filename).group(0))
return file_number
except: # 提取无减号番号
filename = str(re.sub("ts6\d", "", filepath)) # 去除ts64/265
filename = str(re.sub(".*?\.com-\d+", "", filename))
file_number = str(re.match('\w+', filename).group())
file_number = str(file_number.replace(re.match("^[A-Za-z]+", file_number).group(),re.match("^[A-Za-z]+", file_number).group() + '-'))
return file_number
def RunCore():
if Platform == 'win32':
if os.path.exists('core.py'):
os.system('python core.py' + ' "' + i + '" --number "' + getNumber(i) + '"') # 从py文件启动用于源码py
elif os.path.exists('core.exe'):
os.system('core.exe' + ' "' + i + '" --number "' + getNumber(i) + '"') # 从exe启动用于EXE版程序
elif os.path.exists('core.py') and os.path.exists('core.exe'):
os.system('python core.py' + ' "' + i + '" --number "' + getNumber(i) + '"') # 从py文件启动用于源码py
else:
if os.path.exists('core.py'):
os.system('python3 core.py' + ' "' + i + '" --number "' + getNumber(i) + '"') # 从py文件启动用于源码py
elif os.path.exists('core.exe'):
os.system('core.exe' + ' "' + i + '" --number "' + getNumber(i) + '"') # 从exe启动用于EXE版程序
elif os.path.exists('core.py') and os.path.exists('core.exe'):
os.system('python3 core.py' + ' "' + i + '" --number "' + getNumber(i) + '"') # 从py文件启动用于源码py
if __name__ =='__main__': if __name__ =='__main__':
print('[*]===========AV Data Capture===========')
print('[*] Version '+version)
print('[*]=====================================')
CreatFailedFolder()
UpdateCheck()
os.chdir(os.getcwd()) os.chdir(os.getcwd())
for i in movie_lists(): #遍历电影列表 交给core处理
if '_' in i:
os.rename(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), rreplace(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), '_', '-', 1))
i = rreplace(re.search(r'[^\\/:*?"<>|\r\n]+$', i).group(), '_', '-', 1)
os.system('python core.py' + ' "' + i + '"') #选择从py文件启动 用于源码py
#os.system('core.exe' + ' "' + i + '"') #选择从exe文件启动用于EXE版程序
print("[*]=====================================")
print("[!]Cleaning empty folders") count = 0
CEF('JAV_output') count_all = str(len(movie_lists()))
print('[+]Find',str(len(movie_lists())),'movies')
for i in movie_lists(): #遍历电影列表 交给core处理
count = count + 1
percentage = str(count/int(count_all)*100)[:4]+'%'
print('[!] - '+percentage+' ['+str(count)+'/'+count_all+'] -')
try:
print("[!]Making Data for [" + i + "], the number is [" + getNumber(i) + "]")
RunCore()
print("[*]=====================================")
except: # 番号提取异常
print('[-]' + i + ' Cannot catch the number :')
print('[-]Move ' + i + ' to failed folder')
shutil.move(i, str(os.getcwd()) + '/' + 'failed/')
continue
CEF(exclude_directory_1)
CEF(exclude_directory_2)
print("[+]All finished!!!") print("[+]All finished!!!")
input("[+][+]Press enter key exit, you can check the error messge before you exit.\n[+][+]按回车键结束,你可以在结束之前查看错误信息。") input("[+][+]Press enter key exit, you can check the error messge before you exit.\n[+][+]按回车键结束,你可以在结束之前查看错误信息。")

255
README.md
View File

@@ -1,15 +1,70 @@
## 前言 # AV Data Capture
&emsp;&emsp;目前我下的AV越来越多也意味着AV要集中地管理形成媒体库。现在有两款主流的AV元数据获取器"EverAver"和"Javhelper"。前者的优点是元数据获取比较全,缺点是不能批量处理;后者优点是可以批量处理,但是元数据不够全。<br>
&emsp;&emsp;为此综合上述软件特点我写出了本软件为了方便的管理本地AV和更好的手冲体验。
**可以结合pockies大神的[ 打造本地AV毛片媒体库 ](https://pockies.github.io/2019/03/25/everaver-emby-kodi/)看本文档**<br>
**tg官方电报群:[ 点击进群](https://t.me/AV_Data_Capture_Official)**<br> <a title="Hits" target="_blank" href="https://github.com/yoshiko2/AV_Data_Capture"><img src="https://hits.b3log.org/yoshiko2/AV_Data_Capture.svg"></a>
**推荐用法: 按照 [如何使用](#如何使用) 使用该软件后,对于不能正常获取元数据的电影可以用[ Everaver ](http://everaver.blogspot.com/)来补救**<br> ![](https://img.shields.io/badge/build-passing-brightgreen.svg?style=flat-square)
![](https://i.loli.net/2019/06/02/5cf2b5d0bbecf69019.png) ![](https://img.shields.io/github/downloads/yoshiko2/av_data_capture/total.svg?style=flat-square)<br>
![](https://img.shields.io/github/license/yoshiko2/av_data_capture.svg?style=flat-square)
![](https://img.shields.io/github/release/yoshiko2/av_data_capture.svg?style=flat-square)
![](https://img.shields.io/badge/Python-3.7-yellow.svg?style=flat-square&logo=python)<br>
**日本电影元数据 抓取工具 | 刮削器**配合本地影片管理软件EMBY,KODI管理本地影片该软件起到分类与元数据抓取作用利用元数据信息来分类供本地影片分类整理使用。
# 目录
* [免责声明](#免责声明)
* [注意](#注意)
* [你问我答 FAQ](#你问我答-faq)
* [效果图](#效果图)
* [如何使用](#如何使用)
* [下载](#下载)
* [简明教程](#简要教程)
* [模块安装](#1请安装模块在cmd终端逐条输入以下命令安装)
* [配置](#2配置configini)
* [(可选)设置自定义目录和影片重命名规则](#3可选设置自定义目录和影片重命名规则)
* [运行软件](#5运行-av_data_capturepyexe)
* [影片原路径处理](#4建议把软件拷贝和电影的统一目录下)
* [异常处理(重要)](#51异常处理重要)
* [导入至媒体库](#7把jav_output文件夹导入到embykodi中等待元数据刷新完成)
* [关于群晖NAS](#8关于群晖NAS)
* [写在后面](#9写在后面)
# 免责声明
* 本软件仅供**技术交流,学术交流**使用,本项目旨在学习 Python3<br>
* 本软件禁止用于任何非法用途<br>
* 使用者使用该软件产生的一切法律后果由使用者承担<br>
* 不可使用于商业和个人其他意图<br>
* 使用该软件前,请自觉遵守当地法律法规
# 注意
**推荐用法: 使用该软件后,对于不能正常获取元数据的电影可以用 Everaver 来补救**<br>
暂不支持多P电影<br>
# 你问我答 FAQ
### F这软件能下片吗
**Q**:该软件不提供任何影片下载地址,仅供本地影片分类整理使用。
### F什么是元数据
**Q**:元数据包括了影片的:封面,导演,演员,简介,类型......
### F软件收费吗
**Q**:软件永久免费。除了 **作者** 钦点以外,给那些 **利用本软件牟利** 的人送上 **骨灰盒-全家族 | 崭新出厂**
### F软件运行异常怎么办
**Q**:认真看 [异常处理(重要)](#5异常处理重要)
# 效果图
**图片来自网络**,由于相关法律法规,具体效果请自行联想
![](https://i.loli.net/2019/07/04/5d1cf9bb1b08b86592.jpg)
![](https://i.loli.net/2019/07/04/5d1cf9bb2696937880.jpg)<br>
# 如何使用 # 如何使用
release的程序可脱离python环境运行可跳过第一步<br> ### 下载
下载地址(Windows):https://github.com/wenead99/AV_Data_Capture/releases * release的程序可脱离**python环境**运行,可跳过 [模块安装](#1请安装模块在cmd终端逐条输入以下命令安装)<br>Release 下载地址(**仅限Windows**):<br>[![](https://img.shields.io/badge/%E4%B8%8B%E8%BD%BD-windows-blue.svg?style=for-the-badge&logo=windows)](https://github.com/yoshiko2/AV_Data_Capture/releases)<br>
* Linux,MacOS请下载源码包运行
* Windows Python环境:[点击前往](https://www.python.org/downloads/windows/) 选中executable installer下载
* MacOS Python环境[点击前往](https://www.python.org/downloads/mac-osx/)
* Linux Python环境Linux用户懂的吧不解释下载地址
### 简要教程:<br>
**1.把软件拉到和电影的同一目录<br>2.设置ini文件的代理路由器拥有自动代理功能的可以把proxy=后面内容去掉)<br>3.运行软件等待完成<br>4.把JAV_output导入至KODI,EMBY中。<br>详细请看以下教程**<br>
## 1.请安装模块,在CMD/终端逐条输入以下命令安装 ## 1.请安装模块,在CMD/终端逐条输入以下命令安装
```python ```python
@@ -33,16 +88,102 @@ pip install pillow
``` ```
### ###
## 2. 设置proxy.ini ## 2.配置config.ini
#### 1.针对网络审查国家或地区 config.ini
>[common]<br>
>main_mode=1<br>
>failed_output_folder=failed<br>
>success_output_folder=JAV_output<br>
>
>[proxy]<br>
>proxy=127.0.0.1:1080<br>
>timeout=10<br>
>retry=3<br>
>
>[Name_Rule]<br>
>location_rule=actor+'/'+number<br>
>naming_rule=number+'-'+title<br>
>
>[update]<br>
>update_check=1<br>
>
>[media]<br>
>media_warehouse=emby<br>
>#emby or plex<br>
>
>[directory_capture]<br>
>directory=<br>
打开```proxy.ini```,在```[proxy]```下的```proxy```行设置本地代理地址和端口支持Shadowsocks/R,V2RAY本地代理端口:<br> ### 全局设置
例子:```proxy=127.0.0.1:1080``` ---
#### 软件模式
>[common]<br>
>main_mode=1<br>
#### 2.设置自定义目录和影片重命名规则 1为普通模式2为整理模式仅根据女优把电影命名为番号并分类到女优名称的文件夹下
**目前可以提取元素的影片:JAVBUS上有元数据的电影素人系列:300Maan,326scp,326urf,259luxu,siro系列,FC2系列(不推荐)**<br> >failed_output_folder=failed<br>
##### 命名参数<br> >success_output_folder=JAV_outputd<br>
设置成功输出目录和失败输出目录
---
### 网络设置
#### * 针对“某些地区”的代理设置
打开```config.ini```,在```[proxy]```下的```proxy```行设置本地代理地址和端口支持Shadowxxxx/X,V2XXX本地代理端口:<br>
例子:```proxy=127.0.0.1:1080```<br>素人系列抓取建议使用日本代理<br>
**路由器拥有自动代理功能的可以把proxy=后面内容去掉**<br>
**本地代理软件开全局模式的同志同上**<br>
**如果遇到tineout错误可以把文件的proxy=后面的地址和端口删除并开启vpn全局模式或者重启电脑vpn网卡**<br>
#### 连接超时重试设置
>[proxy]<br>
>timeout=10<br>
10为超时重试时间 单位:秒
---
#### 连接重试次数设置
>[proxy]<br>
>retry=3<br>
3即为重试次数
---
#### 检查更新开关
>[update]<br>
>update_check=1<br>
0为关闭1为开启不建议关闭
---
##### 媒体库选择
>[media]<br>
>media_warehouse=emby<br>
>#emby or plex<br>
可选择emby, plex<br>
如果是PLEX请安装插件```XBMCnfoMoviesImporter```
---
#### 调试模式
>[debug_mode]<br>switch=1<br>
如要开启调试模式,请手动输入以上代码到```config.ini```中,开启后可在抓取中显示影片元数据
---
#### 抓取目录选择
>[directory_capture]<br>
>directory=<br>
如果directory后面为空则抓取和程序同一目录下的影片设置为``` * ```可抓取软件所在目录下的所有子目录中的影片
### 3.(可选)设置自定义目录和影片重命名规则
>[Name_Rule]<br>
>location_rule=actor+'/'+number<br>
>naming_rule=number+'-'+title<br>
已有默认配置
---
#### 命名参数
>title = 片名<br> >title = 片名<br>
>actor = 演员<br> >actor = 演员<br>
>studio = 公司<br> >studio = 公司<br>
@@ -54,38 +195,74 @@ pip install pillow
>tag = 类型<br> >tag = 类型<br>
>outline = 简介<br> >outline = 简介<br>
>runtime = 时长<br> >runtime = 时长<br>
##### **例子**:<br>
>目录结构:'JAV_output/'+actor+'/'+actor+' '+' ['+year+']'+title+' ['+number+']'<br>
>影片命名(上面目录之下的文件):'['+number+']-'+title
## 3. 关于番号提取失败或者异常 上面的参数以下都称之为**变量**
>下一张图片来自Pockies的blog:https://pockies.github.io/2019/03/25/everaver-emby-kodi/ 原作者已授权
#### 例子:
自定义规则方法:有两种元素,变量和字符,无论是任何一种元素之间连接必须要用加号 **+** ,比如:```'naming_rule=['+number+']-'+title```,其中冒号 ' ' 内的文字是字符,没有冒号包含的文字是变量,元素之间连接必须要用加号 **+** <br>
目录结构规则:默认 ```location_rule=actor+'/'+number```<br> **不推荐修改时在这里添加title**有时title过长因为Windows API问题抓取数据时新建文件夹容易出错。<br>
影片命名规则:默认 ```naming_rule=number+'-'+title```<br> **在EMBY,KODI等本地媒体库显示的标题不影响目录结构下影片文件的命名**,依旧是 番号+后缀。
### 更新开关
>[update]<br>update_check=1<br>
1为开0为关
## 4.建议把软件拷贝和电影的统一目录下
如果```config.ini```中```directory=```后面为空的情况下
## 5.运行 ```AV_Data_capture.py/.exe```
当文件名包含:<br>
中文,字幕,-c., -C., 处理元数据时会加上**中文字幕**标签
## 5.1 异常处理(重要)
### 请确保软件是完整地确保ini文件内容是和下载提供ini文件内容的一致的
---
### 关于软件打开就闪退
可以打开cmd命令提示符把 ```AV_Data_capture.py/.exe```拖进cmd窗口回车运行查看错误出现的错误信息**依据以下条目解决**
---
### 关于 ```Updata_check``` 和 ```JSON``` 相关的错误
跳转 [网络设置](#网络设置)
---
### 关于```FileNotFoundError: [WinError 3] 系统找不到指定的路径。: 'JAV_output''```
在软件所在文件夹下新建 JAV_output 文件夹,可能是你没有把软件拉到和电影的同一目录
---
### 关于连接拒绝的错误
请设置好[代理](#针对某些地区的代理设置)<br>
---
### 关于Nonetype,xpath报错
同上<br>
---
### 关于番号提取失败或者异常
**目前可以提取元素的影片:JAVBUS上有元数据的电影素人系列:300Maan,259luxu,siro等,FC2系列**<br>
>下一张图片来自Pockies的blog 原作者已授权<br>
![](https://raw.githubusercontent.com/Pockies/pic/master/741f9461gy1g1cxc31t41j20i804zdgo.jpg) ![](https://raw.githubusercontent.com/Pockies/pic/master/741f9461gy1g1cxc31t41j20i804zdgo.jpg)
目前作者已经完善了番号提取机制,功能较为强大,可提取上述文件名的的番号,如果出现提取失败或者异常的情况,请用以下规则命名 目前作者已经完善了番号提取机制,功能较为强大,可提取上述文件名的的番号,如果出现提取失败或者异常的情况,请用以下规则命名<br>
**妈蛋不要喂软件那么多野鸡片子,不让软件好好活了,操**
``` ```
COSQ-004.mp4 COSQ-004.mp4
``` ```
文件名中间要有下划线或者减号"_","-",没有多余的内容只有番号为最佳,可以让软件更好获取元数据 针对 **野鸡番号** 你需要把文件名命名为与抓取网站提供的番号一致文件拓展名除外然后把文件拖拽至core.exe/.py<br>
对于多影片重命名,可以用[ReNamer](http://www.den4b.com/products/renamer)来批量重命名 **野鸡番号**:比如 ```XXX-XXX-1```, ```1301XX-MINA_YUKA``` 这种**野鸡**番号在javbus等资料库存在的作品。<br>**重要**:除了 **影片文件名** ```XXXX-XXX-C```,后面这种-C的是指电影有中文字幕<br>
条件:文件名中间要有下划线或者减号"_","-",没有多余的内容只有番号为最佳,可以让软件更好获取元数据
对于多影片重命名,可以用[ReNamer](http://www.den4b.com/products/renamer)来批量重命名<br>
## 4. 把软件拷贝和AV的统一目录下 ---
## 5. 运行 ```AV_Data_capture.py/.exe``` ### 关于PIL/image.py
你也可以把单个影片拖动到core程序<br> 暂时无解可能是网络问题或者pillow模块打包问题你可以用源码运行要安装好第一步的模块
![](https://i.loli.net/2019/06/02/5cf2b5d03640e73201.gif)
## 6. 软件会自动把元数据获取成功的电影移动到JAV_output文件夹中根据女优分类失败的电影移动到failed文件夹中。
## 7. 把JAV_output文件夹导入到EMBY,KODI中根据封面选片子享受手冲乐趣
## 8.输出的文件如下
![](https://i.loli.net/2019/06/02/5cf2b5cfd1b0226763.png)
![](https://i.loli.net/2019/06/02/5cf2b5cfd1b0246492.png)
![](https://i.loli.net/2019/06/02/5cf2b5d009e4930666.png)
## 软件流程图
![](https://i.loli.net/2019/06/02/5cf2bb9a9e2d997635.png)
## 6.软件会自动把元数据获取成功的电影移动到JAV_output文件夹中根据演员分类失败的电影移动到failed文件夹中。
## 7.把JAV_output文件夹导入到EMBY,KODI中等待元数据刷新完成
## 8.关于群晖NAS
开启SMB在Windows上挂载为网络磁盘即可使用本软件也适用于其他NAS
## 9.写在后面
怎么样,看着自己的日本电影被这样完美地管理,是不是感觉成就感爆棚呢?<br>
**tg官方电报群:[ 点击进群](https://t.me/AV_Data_Capture_Official)**<br>

112
avsox.py Normal file
View File

@@ -0,0 +1,112 @@
import re
from lxml import etree
import json
from bs4 import BeautifulSoup
from ADC_function import *
def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img
soup = BeautifulSoup(htmlcode, 'lxml')
a = soup.find_all(attrs={'class': 'avatar-box'})
d = {}
for i in a:
l = i.img['src']
t = i.span.get_text()
p2 = {t: l}
d.update(p2)
return d
def getTitle(a):
try:
html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/h3/text()')).strip(" ['']") #[0]
return result.replace('/', '')
except:
return ''
def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
soup = BeautifulSoup(a, 'lxml')
a = soup.find_all(attrs={'class': 'avatar-box'})
d = []
for i in a:
d.append(i.span.get_text())
return d
def getStudio(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//p[contains(text(),"制作商: ")]/following-sibling::p[1]/a/text()')).strip(" ['']").replace("', '",' ')
return result1
def getRuntime(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//span[contains(text(),"长度:")]/../text()')).strip(" ['分钟']")
return result1
def getLabel(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//p[contains(text(),"系列:")]/following-sibling::p[1]/a/text()')).strip(" ['']")
return result1
def getNum(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//span[contains(text(),"识别码:")]/../span[2]/text()')).strip(" ['']")
return result1
def getYear(release):
try:
result = str(re.search('\d{4}',release).group())
return result
except:
return release
def getRelease(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//span[contains(text(),"发行时间:")]/../text()')).strip(" ['']")
return result1
def getCover(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div[1]/div[1]/a/img/@src')).strip(" ['']")
return result
def getCover_small(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="waterfall"]/div/a/div[1]/img/@src')).strip(" ['']")
return result
def getTag(a): # 获取演员
soup = BeautifulSoup(a, 'lxml')
a = soup.find_all(attrs={'class': 'genre'})
d = []
for i in a:
d.append(i.get_text())
return d
def main(number):
a = get_html('https://avsox.asia/cn/search/' + number)
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
if result1 == '' or result1 == 'null' or result1 == 'None':
a = get_html('https://avsox.asia/cn/search/' + number.replace('-', '_'))
print(a)
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
if result1 == '' or result1 == 'null' or result1 == 'None':
a = get_html('https://avsox.asia/cn/search/' + number.replace('_', ''))
print(a)
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="waterfall"]/div/a/@href')).strip(" ['']")
web = get_html(result1)
soup = BeautifulSoup(web, 'lxml')
info = str(soup.find(attrs={'class': 'row movie'}))
dic = {
'actor': getActor(web),
'title': getTitle(web).strip(getNum(web)),
'studio': getStudio(info),
'outline': '',#
'runtime': getRuntime(info),
'director': '', #
'release': getRelease(info),
'number': getNum(info),
'cover': getCover(web),
'cover_small': getCover_small(a),
'imagecut': 3,
'tag': getTag(web),
'label': getLabel(info),
'year': getYear(getRelease(info)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': getActorPhoto(web),
'website': result1,
'source': 'avsox.py',
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js
#print(main('041516_541'))

23
config.ini Normal file
View File

@@ -0,0 +1,23 @@
[common]
main_mode=1
failed_output_folder=failed
success_output_folder=JAV_output
[proxy]
proxy=127.0.0.1:1080
timeout=10
retry=3
[Name_Rule]
location_rule=actor+'/'+number
naming_rule=number+'-'+title
[update]
update_check=1
[media]
media_warehouse=emby
#emby or plex
[directory_capture]
directory=

541
core.py Normal file → Executable file
View File

@@ -1,17 +1,30 @@
# -*- coding: utf-8 -*-
import re import re
import os import os
import os.path import os.path
import shutil import shutil
from PIL import Image from PIL import Image
import time import time
import javbus
import json import json
import fc2fans_club
import siro
from ADC_function import * from ADC_function import *
from configparser import ConfigParser from configparser import ConfigParser
import argparse
#=========website========
import fc2fans_club
import siro
import avsox
import javbus
import javdb
#=========website========
#初始化全局变量 #初始化全局变量
Config = ConfigParser()
Config.read(config_file, encoding='UTF-8')
try:
option = ReadMediaWarehouse()
except:
print('[-]Config media_warehouse read failed!')
title='' title=''
studio='' studio=''
year='' year=''
@@ -25,23 +38,46 @@ number=''
cover='' cover=''
imagecut='' imagecut=''
tag=[] tag=[]
cn_sub=''
multi_part=0
part=''
path=''
houzhui=''
website=''
json_data={}
actor_photo={}
cover_small=''
naming_rule =''#eval(config['Name_Rule']['naming_rule']) naming_rule =''#eval(config['Name_Rule']['naming_rule'])
location_rule=''#eval(config['Name_Rule']['location_rule']) location_rule=''#eval(config['Name_Rule']['location_rule'])
program_mode = Config['common']['main_mode']
failed_folder = Config['common']['failed_output_folder']
success_folder = Config['common']['success_output_folder']
#=====================本地文件处理=========================== #=====================本地文件处理===========================
def moveFailedFolder():
global filepath
print('[-]Move to Failed output folder')
shutil.move(filepath, str(os.getcwd()) + '/' + failed_folder + '/')
os._exit(0)
def argparse_get_file(): def argparse_get_file():
import argparse
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--number", help="Enter Number on here", default='')
parser.add_argument("file", help="Write the file path on here") parser.add_argument("file", help="Write the file path on here")
args = parser.parse_args() args = parser.parse_args()
return args.file return (args.file, args.number)
def CreatFailedFolder(): def CreatFailedFolder():
if not os.path.exists('failed/'): # 新建failed文件夹 if not os.path.exists(failed_folder+'/'): # 新建failed文件夹
try: try:
os.makedirs('failed/') os.makedirs(failed_folder+'/')
except: except:
print("[-]failed!can not be make folder 'failed'\n[-](Please run as Administrator)") print("[-]failed!can not be make Failed output folder\n[-](Please run as Administrator)")
os._exit(0) os._exit(0)
def getNumberFromFilename(filepath): def getDataState(json_data): #元数据获取失败检测
if json_data['title'] == '' or json_data['title'] == 'None' or json_data['title'] == 'null':
return 0
else:
return 1
def getDataFromJSON(file_number): #从JSON返回元数据
global title global title
global studio global studio
global year global year
@@ -55,260 +91,409 @@ def getNumberFromFilename(filepath):
global cover global cover
global imagecut global imagecut
global tag global tag
global image_main
global cn_sub
global website
global actor_photo
global cover_small
global json_data
global naming_rule global naming_rule
global location_rule global location_rule
#================================================获取文件番号================================================ # ================================================网站规则添加开始================================================
try: #试图提取番号
# ====番号获取主程序====
try: # 普通提取番号 主要处理包含减号-的番号
filepath.strip('22-sht.me').strip('-HD').strip('-hd')
filename = str(re.sub("\[\d{4}-\d{1,2}-\d{1,2}\] - ", "", filepath)) # 去除文件名中文件名
file_number = re.search('\w+-\d+', filename).group()
except: # 提取不含减号-的番号
try: # 提取东京热番号格式 n1087
filename1 = str(re.sub("h26\d", "", filepath)).strip('Tokyo-hot').strip('tokyo-hot')
filename0 = str(re.sub(".*?\.com-\d+", "", filename1)).strip('_')
file_number = str(re.search('n\d{4}', filename0).group(0))
except: # 提取无减号番号
filename1 = str(re.sub("h26\d", "", filepath)) # 去除h264/265
filename0 = str(re.sub(".*?\.com-\d+", "", filename1))
file_number2 = str(re.match('\w+', filename0).group())
file_number = str(file_number2.replace(re.match("^[A-Za-z]+", file_number2).group(),re.match("^[A-Za-z]+", file_number2).group() + '-'))
if not re.search('\w-', file_number).group() == 'None':
file_number = re.search('\w+-\w+', filename).group()
#上面是插入减号-到番号中
print("[!]Making Data for [" + filename + "],the number is [" + file_number + "]")
# ====番号获取主程序=结束===
except Exception as e: #番号提取异常
print('[-]'+str(os.path.basename(filepath))+' Cannot catch the number :')
print('[-]' + str(os.path.basename(filepath)) + ' :', e)
print('[-]Move ' + os.path.basename(filepath) + ' to failed folder')
shutil.move(filepath, str(os.getcwd()) + '/' + 'failed/')
os._exit(0)
except IOError as e2:
print('[-]' + str(os.path.basename(filepath)) + ' Cannot catch the number :')
print('[-]' + str(os.path.basename(filepath)) + ' :',e2)
print('[-]Move ' + os.path.basename(filepath) + ' to failed folder')
shutil.move(filepath, str(os.getcwd()) + '/' + 'failed/')
os._exit(0)
try:
if re.match('^\d{5,}', file_number):
json_data = json.loads(avsox.main(file_number))
if getDataState(json_data) == 0: # 如果元数据获取失败,请求番号至其他网站抓取
# ================================================网站规则添加开始================================================ json_data = json.loads(javdb.main(file_number))
#==
try: #添加 需要 正则表达式的规则 elif re.match('\d+\D+', file_number):
#=======================javbus.py=======================
if re.search('^\d{5,}', file_number).group() in filename:
json_data = json.loads(javbus.main_uncensored(file_number))
except: #添加 无需 正则表达式的规则
# ====================fc2fans_club.py===================
if 'fc2' in filename:
json_data = json.loads(fc2fans_club.main(file_number.strip('fc2_').strip('fc2-')))
elif 'FC2' in filename:
json_data = json.loads(fc2fans_club.main(file_number.strip('FC2_').strip('FC2-')))
#========================siro.py========================
elif 'siro' in filename:
json_data = json.loads(siro.main(file_number)) json_data = json.loads(siro.main(file_number))
elif 'SIRO' in filename: if getDataState(json_data) == 0: # 如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(javbus.main(file_number))
elif getDataState(json_data) == 0: # 如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(javdb.main(file_number))
# ==
elif 'fc2' in file_number or 'FC2' in file_number:
json_data = json.loads(fc2fans_club.main(file_number))
# ==
elif 'HEYZO' in number or 'heyzo' in number or 'Heyzo' in number:
json_data = json.loads(avsox.main(file_number))
# ==
elif 'siro' in file_number or 'SIRO' in file_number or 'Siro' in file_number:
json_data = json.loads(siro.main(file_number)) json_data = json.loads(siro.main(file_number))
elif '259luxu' in filename: # ==
json_data = json.loads(siro.main(file_number))
elif '259LUXU' in filename:
json_data = json.loads(siro.main(file_number))
elif '300MAAN' in filename:
json_data = json.loads(siro.main(file_number))
elif '300maan' in filename:
json_data = json.loads(siro.main(file_number))
elif '326SCP' in filename:
json_data = json.loads(siro.main(file_number))
elif '326scp' in filename:
json_data = json.loads(siro.main(file_number))
elif '326URF' in filename:
json_data = json.loads(siro.main(file_number))
elif '326urf' in filename:
json_data = json.loads(siro.main(file_number))
#=======================javbus.py=======================
else: else:
json_data = json.loads(javbus.main(file_number)) json_data = json.loads(javbus.main(file_number))
if getDataState(json_data) == 0: # 如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(avsox.main(file_number))
elif getDataState(json_data) == 0: # 如果元数据获取失败,请求番号至其他网站抓取
json_data = json.loads(javdb.main(file_number))
# ================================================网站规则添加结束================================================
title = str(json_data['title']).replace(' ','')
#================================================网站规则添加结束================================================
title = json_data['title']
studio = json_data['studio'] studio = json_data['studio']
year = json_data['year'] year = json_data['year']
outline = json_data['outline'] outline = json_data['outline']
runtime = json_data['runtime'] runtime = json_data['runtime']
director = json_data['director'] director = json_data['director']
actor_list= str(json_data['actor']).strip("[ ]").replace("'",'').replace(" ",'').split(',') #字符串转列表 actor_list = str(json_data['actor']).strip("[ ]").replace("'", '').split(',') # 字符串转列表
release = json_data['release'] release = json_data['release']
number = json_data['number'] number = json_data['number']
cover = json_data['cover'] cover = json_data['cover']
try:
cover_small = json_data['cover_small']
except:
cover_small=''
imagecut = json_data['imagecut'] imagecut = json_data['imagecut']
tag = str(json_data['tag']).strip("[ ]").replace("'",'').replace(" ",'').split(',') #字符串转列表 tag = str(json_data['tag']).strip("[ ]").replace("'", '').replace(" ", '').split(',') # 字符串转列表 @
actor = str(actor_list).strip("[ ]").replace("'",'').replace(" ",'') actor = str(actor_list).strip("[ ]").replace("'", '').replace(" ", '')
actor_photo = json_data['actor_photo']
website = json_data['website']
source = json_data['source']
if title == '' or number == '':
print('[-]Movie Data not found!')
moveFailedFolder()
if imagecut == '3':
DownloadFileWithFilename()
# ====================处理异常字符====================== #\/:*?"<>|
title = title.replace('\\', '')
title = title.replace('/', '')
title = title.replace(':', '')
title = title.replace('*', '')
title = title.replace('?', '')
title = title.replace('"', '')
title = title.replace('<', '')
title = title.replace('>', '')
title = title.replace('|', '')
# ====================处理异常字符 END================== #\/:*?"<>|
naming_rule = eval(config['Name_Rule']['naming_rule']) naming_rule = eval(config['Name_Rule']['naming_rule'])
location_rule =eval(config['Name_Rule']['location_rule']) location_rule = eval(config['Name_Rule']['location_rule'])
except IOError as e: def smallCoverCheck():
print('[-]'+str(e)) if imagecut == 3:
print('[-]Move ' + filename + ' to failed folder') if option == 'emby':
shutil.move(filepath, str(os.getcwd())+'/'+'failed/') DownloadFileWithFilename(cover_small, '1.jpg', path)
os._exit(0) img = Image.open(path + '/1.jpg')
w = img.width
except Exception as e: h = img.height
print('[-]'+str(e)) img.save(path + '/' + number + '.png')
print('[-]Move ' + filename + ' to failed folder') time.sleep(1)
shutil.move(filepath, str(os.getcwd())+'/'+'failed/') os.remove(path + '/1.jpg')
os._exit(0) if option == 'plex':
path = '' #设置path为全局变量后面移动文件要用 DownloadFileWithFilename(cover_small, '1.jpg', path)
def creatFolder(): img = Image.open(path + '/1.jpg')
w = img.width
h = img.height
img.save(path + '/poster.png')
os.remove(path + '/1.jpg')
def creatFolder(): #创建文件夹
global actor
global path global path
if len(actor) > 240: #新建成功输出文件夹 if len(os.getcwd()+path) > 240: #新建成功输出文件夹
path = location_rule.replace("'actor'","'超多人'",3).replace("actor","'超多人'",3) #path为影片+元数据所在目录 path = success_folder+'/'+location_rule.replace("'actor'","'超多人'",3).replace("actor","'超多人'",3) #path为影片+元数据所在目录
#print(path)
else: else:
path = location_rule path = success_folder+'/'+location_rule
#print(path) #print(path)
if not os.path.exists(path): if not os.path.exists(path):
try:
os.makedirs(path)
except:
path = success_folder+'/'+location_rule.replace('/['+number+']-'+title,"/number")
#print(path)
os.makedirs(path) os.makedirs(path)
#=====================资源下载部分=========================== #=====================资源下载部分===========================
def DownloadFileWithFilename(url,filename,path): #path = examle:photo , video.in the Project Folder! def DownloadFileWithFilename(url,filename,path): #path = examle:photo , video.in the Project Folder!
config = ConfigParser()
config.read('proxy.ini', encoding='UTF-8')
proxy = str(config['proxy']['proxy'])
if not str(config['proxy']['proxy']) == '':
try: try:
proxy = Config['proxy']['proxy']
timeout = int(Config['proxy']['timeout'])
retry_count = int(Config['proxy']['retry'])
except:
print('[-]Proxy config error! Please check the config.')
i = 0
while i < retry_count:
try:
if not proxy == '':
if not os.path.exists(path): if not os.path.exists(path):
os.makedirs(path) os.makedirs(path)
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
r = requests.get(url, headers=headers,proxies={"http": "http://" + str(proxy), "https": "https://" + str(proxy)}) r = requests.get(url, headers=headers, timeout=timeout,proxies={"http": "http://" + str(proxy), "https": "https://" + str(proxy)})
if r == '':
print('[-]Movie Data not found!')
os._exit(0)
with open(str(path) + "/" + filename, "wb") as code: with open(str(path) + "/" + filename, "wb") as code:
code.write(r.content) code.write(r.content)
# print(bytes(r),file=code) return
except IOError as e:
print("[-]Movie not found in All website!")
print("[-]" + filename, e)
# print("[*]=====================================")
return "failed"
except Exception as e1:
print(e1)
print("[-]Download Failed2!")
time.sleep(3)
os._exit(0)
else: else:
try:
if not os.path.exists(path): if not os.path.exists(path):
os.makedirs(path) os.makedirs(path)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} headers = {
r = requests.get(url, headers=headers) 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
r = requests.get(url, timeout=timeout, headers=headers)
if r == '':
print('[-]Movie Data not found!')
os._exit(0)
with open(str(path) + "/" + filename, "wb") as code: with open(str(path) + "/" + filename, "wb") as code:
code.write(r.content) code.write(r.content)
# print(bytes(r),file=code) return
except IOError as e: except requests.exceptions.RequestException:
print("[-]Movie not found in All website!") i += 1
print("[-]" + filename, e) print('[-]Image Download : Connect retry '+str(i)+'/'+str(retry_count))
# print("[*]=====================================") except requests.exceptions.ConnectionError:
return "failed" i += 1
except Exception as e1: print('[-]Image Download : Connect retry '+str(i)+'/'+str(retry_count))
print(e1) except requests.exceptions.ProxyError:
print("[-]Download Failed2!") i += 1
time.sleep(3) print('[-]Image Download : Connect retry '+str(i)+'/'+str(retry_count))
os._exit(0) except requests.exceptions.ConnectTimeout:
def PrintFiles(path): i += 1
print('[-]Image Download : Connect retry '+str(i)+'/'+str(retry_count))
print('[-]Connect Failed! Please check your Proxy or Network!')
moveFailedFolder()
def imageDownload(filepath): #封面是否下载成功否则移动到failed
if option == 'emby':
if DownloadFileWithFilename(cover, number + '.jpg', path) == 'failed':
moveFailedFolder()
DownloadFileWithFilename(cover, number + '.jpg', path)
if multi_part == 1:
old_name = os.path.join(path, number + '.jpg')
new_name = os.path.join(path, number + part + '.jpg')
os.rename(old_name, new_name)
print('[+]Image Downloaded!', path + '/' + number + part + '.jpg')
else:
print('[+]Image Downloaded!', path + '/' + number + '.jpg')
elif option == 'plex':
if DownloadFileWithFilename(cover, 'fanart.jpg', path) == 'failed':
moveFailedFolder()
DownloadFileWithFilename(cover, 'fanart.jpg', path)
print('[+]Image Downloaded!', path + '/fanart.jpg')
def PrintFiles(filepath):
try: try:
if not os.path.exists(path): if not os.path.exists(path):
os.makedirs(path) os.makedirs(path)
with open(path + "/" + naming_rule + ".nfo", "wt", encoding='UTF-8') as code: if option == 'plex':
with open(path + "/" + number + ".nfo", "wt", encoding='UTF-8') as code:
print("<movie>", file=code) print("<movie>", file=code)
print(" <title>" + title + "</title>", file=code) print(" <title>" + naming_rule + "</title>", file=code)
print(" <set>", file=code) print(" <set>", file=code)
print(" </set>", file=code) print(" </set>", file=code)
print(" <studio>" + studio + "+</studio>", file=code) print(" <studio>" + studio + "+</studio>", file=code)
print(" <year>" + year + "</year>", file=code) print(" <year>" + year + "</year>", file=code)
print(" <outline>"+outline+"</outline>", file=code) print(" <outline>" + outline + "</outline>", file=code)
print(" <plot>"+outline+"</plot>", file=code) print(" <plot>" + outline + "</plot>", file=code)
print(" <runtime>"+str(runtime).replace(" ","")+"</runtime>", file=code) print(" <runtime>" + str(runtime).replace(" ", "") + "</runtime>", file=code)
print(" <director>" + director + "</director>", file=code) print(" <director>" + director + "</director>", file=code)
print(" <poster>" + naming_rule + ".png</poster>", file=code) print(" <poster>poster.png</poster>", file=code)
print(" <thumb>" + naming_rule + ".png</thumb>", file=code) print(" <thumb>thumb.png</thumb>", file=code)
print(" <fanart>"+naming_rule + '.jpg'+"</fanart>", file=code) print(" <fanart>fanart.jpg</fanart>", file=code)
try: try:
for u in actor_list: for key, value in actor_photo.items():
print(" <actor>", file=code) print(" <actor>", file=code)
print(" <name>" + u + "</name>", file=code) print(" <name>" + key + "</name>", file=code)
if not actor_photo == '': # or actor_photo == []:
print(" <thumb>" + value + "</thumb>", file=code)
print(" </actor>", file=code) print(" </actor>", file=code)
except: except:
aaaa='' aaaa = ''
print(" <maker>" + studio + "</maker>", file=code) print(" <maker>" + studio + "</maker>", file=code)
print(" <label>", file=code) print(" <label>", file=code)
print(" </label>", file=code) print(" </label>", file=code)
if cn_sub == '1':
print(" <tag>中文字幕</tag>", file=code)
try:
for i in str(json_data['tag']).strip("[ ]").replace("'", '').replace(" ", '').split(','):
print(" <tag>" + i + "</tag>", file=code)
except:
aaaaa = ''
try:
for i in str(json_data['tag']).strip("[ ]").replace("'", '').replace(" ", '').split(','):
print(" <genre>" + i + "</genre>", file=code)
except:
aaaaaaaa = ''
if cn_sub == '1':
print(" <genre>中文字幕</genre>", file=code)
print(" <num>" + number + "</num>", file=code)
print(" <release>" + release + "</release>", file=code)
print(" <cover>" + cover + "</cover>", file=code)
print(" <website>" + website + "</website>", file=code)
print("</movie>", file=code)
print("[+]Writeed! " + path + "/" + number + ".nfo")
elif option == 'emby':
with open(path + "/" + number + ".nfo", "wt", encoding='UTF-8') as code:
print("<movie>", file=code)
print(" <title>" + naming_rule + "</title>", file=code)
print(" <set>", file=code)
print(" </set>", file=code)
print(" <studio>" + studio + "+</studio>", file=code)
print(" <year>" + year + "</year>", file=code)
print(" <outline>" + outline + "</outline>", file=code)
print(" <plot>" + outline + "</plot>", file=code)
print(" <runtime>" + str(runtime).replace(" ", "") + "</runtime>", file=code)
print(" <director>" + director + "</director>", file=code)
print(" <poster>" + number + ".png</poster>", file=code)
print(" <thumb>" + number + ".png</thumb>", file=code)
print(" <fanart>" + number + '.jpg' + "</fanart>", file=code)
try:
for key, value in actor_photo.items():
print(" <actor>", file=code)
print(" <name>" + key + "</name>", file=code)
if not actor_photo == '': # or actor_photo == []:
print(" <thumb>" + value + "</thumb>", file=code)
print(" </actor>", file=code)
except:
aaaa = ''
print(" <maker>" + studio + "</maker>", file=code)
print(" <label>", file=code)
print(" </label>", file=code)
if cn_sub == '1':
print(" <tag>中文字幕</tag>", file=code)
try: try:
for i in tag: for i in tag:
print(" <tag>" + i + "</tag>", file=code) print(" <tag>" + i + "</tag>", file=code)
except: except:
aaaaa='' aaaaa = ''
try: try:
for i in tag: for i in tag:
print(" <genre>" + i + "</genre>", file=code) print(" <genre>" + i + "</genre>", file=code)
except: except:
aaaaaaaa='' aaaaaaaa = ''
if cn_sub == '1':
print(" <genre>中文字幕</genre>", file=code)
print(" <num>" + number + "</num>", file=code) print(" <num>" + number + "</num>", file=code)
print(" <release>" + release + "</release>", file=code) print(" <release>" + release + "</release>", file=code)
print(" <cover>"+cover+"</cover>", file=code) print(" <cover>" + cover + "</cover>", file=code)
print(" <website>" + "https://www.javbus.com/"+number + "</website>", file=code) print(" <website>" + "https://www.javbus.com/" + number + "</website>", file=code)
print("</movie>", file=code) print("</movie>", file=code)
print("[+]Writeed! "+path + "/" + naming_rule + ".nfo") print("[+]Writeed! " + path + "/" + number + ".nfo")
except IOError as e: except IOError as e:
print("[-]Write Failed!") print("[-]Write Failed!")
print(e) print(e)
moveFailedFolder()
except Exception as e1: except Exception as e1:
print(e1) print(e1)
print("[-]Write Failed!") print("[-]Write Failed!")
def imageDownload(filepath): #封面是否下载成功否则移动到failed moveFailedFolder()
if DownloadFileWithFilename(cover,naming_rule+ '.jpg', path) == 'failed':
shutil.move(filepath, 'failed/')
os._exit(0)
DownloadFileWithFilename(cover, naming_rule + '.jpg', path)
print('[+]Image Downloaded!', path +'/'+naming_rule+'.jpg')
def cutImage(): def cutImage():
if option == 'plex':
if imagecut == 1: if imagecut == 1:
try: try:
img = Image.open(path + '/' + naming_rule + '.jpg') img = Image.open(path + '/fanart.jpg')
imgSize = img.size imgSize = img.size
w = img.width w = img.width
h = img.height h = img.height
img2 = img.crop((w / 1.9, 0, w, h)) img2 = img.crop((w / 1.9, 0, w, h))
img2.save(path + '/' + naming_rule + '.png') img2.save(path + '/poster.png')
except: except:
print('[-]Cover cut failed!') print('[-]Cover cut failed!')
else: elif imagecut == 0:
img = Image.open(path + '/' + naming_rule + '.jpg') img = Image.open(path + '/fanart.jpg')
w = img.width w = img.width
h = img.height h = img.height
img.save(path + '/' + naming_rule + '.png') img.save(path + '/poster.png')
elif option == 'emby':
if imagecut == 1:
try:
img = Image.open(path + '/' + number + '.jpg')
imgSize = img.size
w = img.width
h = img.height
img2 = img.crop((w / 1.9, 0, w, h))
img2.save(path + '/' + number + '.png')
except:
print('[-]Cover cut failed!')
elif imagecut == 0:
img = Image.open(path + '/' + number + '.jpg')
w = img.width
h = img.height
img.save(path + '/' + number + '.png')
def pasteFileToFolder(filepath, path): #文件路径,番号,后缀,要移动至的位置 def pasteFileToFolder(filepath, path): #文件路径,番号,后缀,要移动至的位置
houzhui = str(re.search('[.](AVI|RMVB|WMV|MOV|MP4|MKV|FLV|avi|rmvb|wmv|mov|mp4|mkv|flv)$', filepath).group()) global houzhui
os.rename(filepath, naming_rule + houzhui) houzhui = str(re.search('[.](AVI|RMVB|WMV|MOV|MP4|MKV|FLV|TS|avi|rmvb|wmv|mov|mp4|mkv|flv|ts)$', filepath).group())
shutil.move(naming_rule + houzhui, path) try:
os.rename(filepath, path + '/' + number + houzhui)
except FileExistsError:
print('[-]File Exists! Please check your movie!')
print('[-]move to the root folder of the program.')
os._exit(0)
except PermissionError:
print('[-]Error! Please run as administrator!')
os._exit(0)
def renameJpgToBackdrop_copy():
if option == 'plex':
shutil.copy(path + '/fanart.jpg', path + '/Backdrop.jpg')
shutil.copy(path + '/poster.png', path + '/thumb.png')
if option == 'emby':
shutil.copy(path + '/' + number + '.jpg', path + '/Backdrop.jpg')
def renameBackdropToJpg_copy():
if option == 'plex':
shutil.copy(path + '/fanart.jpg', path + '/Backdrop.jpg')
shutil.copy(path + '/poster.png', path + '/thumb.png')
if option == 'emby':
shutil.copy(path + '/Backdrop.jpg', path + '/' + number + '.jpg')
print('[+]Image Downloaded!', path + '/' + number + '.jpg')
def get_part(filepath):
try:
if re.search('-CD\d+', filepath):
return re.findall('-CD\d+', filepath)[0]
except:
print("[-]failed!Please rename the filename again!")
moveFailedFolder()
def debug_mode():
try:
if config['debug_mode']['switch'] == '1':
print('[+] ---Debug info---')
for i, v in json_data.items():
if i == 'outline':
print('[+] -', i, ':', len(v), 'characters')
continue
if i == 'actor_photo' or i == 'year':
continue
print('[+] -', i, ':', v)
print('[+] ---Debug info---')
except:
aaa=''
if __name__ == '__main__': if __name__ == '__main__':
filepath=argparse_get_file() #影片的路径 filepath=argparse_get_file()[0] #影片的路径
if '-CD' in filepath or '-cd' in filepath:
multi_part = 1
part = get_part(filepath)
if '-c.' in filepath or '-C.' in filepath or '中文' in filepath or '字幕' in filepath:
cn_sub='1'
if argparse_get_file()[1] == '': #获取手动拉去影片获取的番号
try:
number = str(re.findall(r'(.+?)\.',str(re.search('([^<>/\\\\|:""\\*\\?]+)\\.\\w+$',filepath).group()))).strip("['']").replace('_','-')
print("[!]Making Data for [" + number + "]")
except:
print("[-]failed!Please rename the filename again!")
moveFailedFolder()
else:
number = argparse_get_file()[1]
CreatFailedFolder() CreatFailedFolder()
getNumberFromFilename(filepath) #定义番号 getDataFromJSON(number) # 定义番号
creatFolder() #创建文件夹 debug_mode()
imageDownload(filepath) #creatFoder会返回番号路径 creatFolder() # 创建文件夹
PrintFiles(path)#打印文件 if program_mode == '1':
cutImage() #裁剪图 if part == '-CD1' or multi_part == 0:
pasteFileToFolder(filepath,path) #移动文件 smallCoverCheck()
imageDownload(filepath) # creatFoder会返回番号路径
if multi_part == 1:
number += part
PrintFiles(filepath) # 打印文件
cutImage() # 裁剪图
renameJpgToBackdrop_copy()
else:
number += part
renameBackdropToJpg_copy()
pasteFileToFolder(filepath, path) # 移动文件
elif program_mode == '2':
pasteFileToFolder(filepath, path) # 移动文件

86
fc2fans_club.py Normal file → Executable file
View File

@@ -4,58 +4,72 @@ import json
import ADC_function import ADC_function
def getTitle(htmlcode): #获取厂商 def getTitle(htmlcode): #获取厂商
#print(htmlcode)
html = etree.fromstring(htmlcode,etree.HTMLParser()) html = etree.fromstring(htmlcode,etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div/div[1]/h3/text()')).strip(" ['']") result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/h2/text()')).strip(" ['']")
result2 = str(re.sub('\D{2}2-\d+','',result)).replace(' ','',1)
return result2
def getStudio(htmlcode): #获取厂商
html = etree.fromstring(htmlcode,etree.HTMLParser())
result = str(html.xpath('/html/body/div[2]/div/div[1]/h5[3]/a[1]/text()')).strip(" ['']")
return result return result
def getActor(htmlcode):
try:
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/div/div[2]/dl/dd[5]/a/text()')).strip(" ['']")
return result
except:
return ''
def getStudio(htmlcode): #获取厂商
try:
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/div/div[2]/dl/dd[5]/a/text()')).strip(" ['']")
return result
except:
return ''
def getNum(htmlcode): #获取番号 def getNum(htmlcode): #获取番号
html = etree.fromstring(htmlcode, etree.HTMLParser()) html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[1]/span[2]/text()')).strip(" ['']") result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[1]/span[2]/text()')).strip(" ['']")
return result return result
def getRelease(number): def getRelease(htmlcode2): #
a=ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id='+str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-")+'&utm_source=aff_php&utm_medium=source_code&utm_campaign=from_aff_php') html=etree.fromstring(htmlcode2,etree.HTMLParser())
html=etree.fromstring(a,etree.HTMLParser())
result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/div/div[2]/dl/dd[4]/text()')).strip(" ['']") result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/div/div[2]/dl/dd[4]/text()')).strip(" ['']")
return result return result
def getCover(htmlcode,number): #获取厂商 def getCover(htmlcode2): #获取厂商 #
a = ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id=' + str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-") + '&utm_source=aff_php&utm_medium=source_code&utm_campaign=from_aff_php') html = etree.fromstring(htmlcode2, etree.HTMLParser())
html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/div/div[1]/a/img/@src')).strip(" ['']") result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[1]/div/div[1]/a/img/@src')).strip(" ['']")
return 'http:'+result return 'http:' + result
def getOutline(htmlcode,number): #获取番号 def getOutline(htmlcode2): #获取番号 #
a = ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id=' + str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-") + '&utm_source=aff_php&utm_medium=source_code&utm_campaign=from_aff_php') html = etree.fromstring(htmlcode2, etree.HTMLParser())
html = etree.fromstring(a, etree.HTMLParser()) result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[4]/p/text()')).strip(" ['']").replace("\\n",'',10000).replace("'",'',10000).replace(', ,','').strip(' ').replace('。,',',')
result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[4]/p/text()')).replace("\\n",'',10000).strip(" ['']").replace("'",'',10000)
return result return result
# def getTag(htmlcode,number): #获取番号 def getTag(htmlcode): #获取番号
# a = ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id=' + str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-") + '&utm_source=aff_php&utm_medium=source_code&utm_campaign=from_aff_php') html = etree.fromstring(htmlcode, etree.HTMLParser())
# html = etree.fromstring(a, etree.HTMLParser()) result = html.xpath('//*[@id="container"]/div[1]/div/article/section[6]/ul/li/a/text()')
# result = str(html.xpath('//*[@id="container"]/div[1]/div/article/section[4]/p/text()')).replace("\\n",'',10000).strip(" ['']").replace("'",'',10000) return result
# return result def getYear(release):
try:
result = re.search('\d{4}',release).group()
return result
except:
return ''
def main(number): def main(number):
str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-") number=number.replace('PPV','').replace('ppv','').strip('fc2_').strip('fc2-').strip('ppv-').strip('PPV-').strip('FC2_').strip('FC2-').strip('ppv-').strip('PPV-').replace('fc2ppv-','').replace('FC2PPV-','')
htmlcode = ADC_function.get_html('http://fc2fans.club/html/FC2-' + number + '.html') htmlcode2 = ADC_function.get_html('http://adult.contents.fc2.com/article_search.php?id='+str(number).lstrip("FC2-").lstrip("fc2-").lstrip("fc2_").lstrip("fc2-")+'')
#htmlcode = ADC_function.get_html('http://fc2fans.club/html/FC2-' + number + '.html')
dic = { dic = {
'title': getTitle(htmlcode), 'title': getTitle(htmlcode2),
'studio': getStudio(htmlcode), 'studio': getStudio(htmlcode2),
'year': str(re.search('\d{4}',getRelease(number)).group()), 'year': getYear(getRelease(htmlcode2)),
'outline': getOutline(htmlcode,number), 'outline': getOutline(htmlcode2),
'runtime': '', 'runtime': getYear(getRelease(htmlcode2)),
'director': getStudio(htmlcode), 'director': getStudio(htmlcode2),
'actor': '', 'actor': getStudio(htmlcode2),
'release': getRelease(number), 'release': getRelease(htmlcode2),
'number': 'FC2-'+number, 'number': 'FC2-'+number,
'cover': getCover(htmlcode,number), 'cover': getCover(htmlcode2),
'imagecut': 0, 'imagecut': 0,
'tag':" ", 'tag': getTag(htmlcode2),
'actor_photo':'',
'website': 'http://adult.contents.fc2.com/article_search.php?id=' + number,
'source': 'fc2fans_club.py',
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
return js return js
#print(main('1104989')) #print(main('1145465'))

139
javbus.py Normal file → Executable file
View File

@@ -1,26 +1,29 @@
import re import re
import requests #need install
from pyquery import PyQuery as pq#need install from pyquery import PyQuery as pq#need install
from lxml import etree#need install from lxml import etree#need install
import os
import os.path
import shutil
from bs4 import BeautifulSoup#need install from bs4 import BeautifulSoup#need install
from PIL import Image#need install
import time
import json import json
from ADC_function import *
def get_html(url):#网页请求核心 def getActorPhoto(htmlcode): #//*[@id="star_qdt"]/li/a/img
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} soup = BeautifulSoup(htmlcode, 'lxml')
getweb = requests.get(str(url),timeout=10,headers=headers).text a = soup.find_all(attrs={'class': 'star-name'})
try: d={}
return getweb for i in a:
except: l=i.a['href']
print("[-]Connect Failed! Please check your Proxy.") t=i.get_text()
html = etree.fromstring(get_html(l), etree.HTMLParser())
p=str(html.xpath('//*[@id="waterfall"]/div[1]/div/div[1]/img/@src')).strip(" ['']")
p2={t:p}
d.update(p2)
return d
def getTitle(htmlcode): #获取标题 def getTitle(htmlcode): #获取标题
doc = pq(htmlcode) doc = pq(htmlcode)
title=str(doc('div.container h3').text()).replace(' ','-') title=str(doc('div.container h3').text()).replace(' ','-')
try:
title2 = re.sub('n\d+-','',title)
return title2
except:
return title return title
def getStudio(htmlcode): #获取厂商 def getStudio(htmlcode): #获取厂商
html = etree.fromstring(htmlcode,etree.HTMLParser()) html = etree.fromstring(htmlcode,etree.HTMLParser())
@@ -34,7 +37,6 @@ def getCover(htmlcode): #获取封面链接
doc = pq(htmlcode) doc = pq(htmlcode)
image = doc('a.bigImage') image = doc('a.bigImage')
return image.attr('href') return image.attr('href')
print(image.attr('href'))
def getRelease(htmlcode): #获取出版日期 def getRelease(htmlcode): #获取出版日期
html = etree.fromstring(htmlcode, etree.HTMLParser()) html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']") result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[2]/text()')).strip(" ['']")
@@ -62,8 +64,10 @@ def getOutline(htmlcode): #获取演员
doc = pq(htmlcode) doc = pq(htmlcode)
result = str(doc('tr td div.mg-b20.lh4 p.mg-b20').text()) result = str(doc('tr td div.mg-b20.lh4 p.mg-b20').text())
return result return result
def getSerise(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/div[5]/div[1]/div[2]/p[7]/a/text()')).strip(" ['']")
return result
def getTag(htmlcode): # 获取演员 def getTag(htmlcode): # 获取演员
tag = [] tag = []
soup = BeautifulSoup(htmlcode, 'lxml') soup = BeautifulSoup(htmlcode, 'lxml')
@@ -76,31 +80,16 @@ def getTag(htmlcode): # 获取演员
def main(number): def main(number):
htmlcode=get_html('https://www.javbus.com/'+number) try:
dww_htmlcode=get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = {
'title': getTitle(htmlcode),
'studio': getStudio(htmlcode),
'year': str(re.search('\d{4}',getYear(htmlcode)).group()),
'outline': getOutline(dww_htmlcode),
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'actor': getActor(htmlcode),
'release': getRelease(htmlcode),
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'imagecut': 1,
'tag': getTag(htmlcode)
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
if 'HEYZO' in number or 'heyzo' in number or 'Heyzo' in number:
htmlcode = get_html('https://www.javbus.com/' + number) htmlcode = get_html('https://www.javbus.com/' + number)
try:
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", '')) dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
except:
dww_htmlcode = ''
dic = { dic = {
'title': getTitle(htmlcode), 'title': str(re.sub('\w+-\d+-', '', getTitle(htmlcode))),
'studio': getStudio(htmlcode), 'studio': getStudio(htmlcode),
'year': getYear(htmlcode), 'year': str(re.search('\d{4}', getYear(htmlcode)).group()),
'outline': getOutline(dww_htmlcode), 'outline': getOutline(dww_htmlcode),
'runtime': getRuntime(htmlcode), 'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode), 'director': getDirector(htmlcode),
@@ -109,20 +98,28 @@ def main(number):
'number': getNum(htmlcode), 'number': getNum(htmlcode),
'cover': getCover(htmlcode), 'cover': getCover(htmlcode),
'imagecut': 1, 'imagecut': 1,
'tag': getTag(htmlcode) 'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
'actor_photo': getActorPhoto(htmlcode),
'website': 'https://www.javbus.com/' + number,
'source' : 'javbus.py',
} }
js2 = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js2
return js return js
except:
return main_uncensored(number)
def main_uncensored(number): def main_uncensored(number):
htmlcode = get_html('https://www.javbus.com/' + number) htmlcode = get_html('https://www.javbus.com/' + number)
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
if getTitle(htmlcode) == '':
htmlcode = get_html('https://www.javbus.com/' + number.replace('-','_'))
dww_htmlcode = get_html("https://www.dmm.co.jp/mono/dvd/-/detail/=/cid=" + number.replace("-", ''))
dic = { dic = {
'title': getTitle(htmlcode), 'title': str(re.sub('\w+-\d+-','',getTitle(htmlcode))).replace(getNum(htmlcode)+'-',''),
'studio': getStudio(htmlcode), 'studio': getStudio(htmlcode),
'year': getYear(htmlcode), 'year': getYear(htmlcode),
'outline': getOutline(htmlcode), 'outline': getOutline(dww_htmlcode),
'runtime': getRuntime(htmlcode), 'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode), 'director': getDirector(htmlcode),
'actor': getActor(htmlcode), 'actor': getActor(htmlcode),
@@ -130,57 +127,11 @@ def main_uncensored(number):
'number': getNum(htmlcode), 'number': getNum(htmlcode),
'cover': getCover(htmlcode), 'cover': getCover(htmlcode),
'tag': getTag(htmlcode), 'tag': getTag(htmlcode),
'label': getSerise(htmlcode),
'imagecut': 0, 'imagecut': 0,
'actor_photo': '',
'website': 'https://www.javbus.com/' + number,
'source': 'javbus.py',
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
if getYear(htmlcode) == '' or getYear(htmlcode) == 'null':
number2 = number.replace('-', '_')
htmlcode = get_html('https://www.javbus.com/' + number2)
dic2 = {
'title': getTitle(htmlcode),
'studio': getStudio(htmlcode),
'year': getYear(htmlcode),
'outline': '',
'runtime': getRuntime(htmlcode),
'director': getDirector(htmlcode),
'actor': getActor(htmlcode),
'release': getRelease(htmlcode),
'number': getNum(htmlcode),
'cover': getCover(htmlcode),
'tag': getTag(htmlcode),
'imagecut': 0,
}
js2 = json.dumps(dic2, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js2
return js return js
# def return1():
# json_data=json.loads(main('ipx-292'))
#
# title = str(json_data['title'])
# studio = str(json_data['studio'])
# year = str(json_data['year'])
# outline = str(json_data['outline'])
# runtime = str(json_data['runtime'])
# director = str(json_data['director'])
# actor = str(json_data['actor'])
# release = str(json_data['release'])
# number = str(json_data['number'])
# cover = str(json_data['cover'])
# tag = str(json_data['tag'])
#
# print(title)
# print(studio)
# print(year)
# print(outline)
# print(runtime)
# print(director)
# print(actor)
# print(release)
# print(number)
# print(cover)
# print(tag)
# return1()

139
javdb.py Executable file
View File

@@ -0,0 +1,139 @@
import re
from lxml import etree
import json
from bs4 import BeautifulSoup
from ADC_function import *
def getTitle(a):
try:
html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('/html/body/section/div/h2/strong/text()')).strip(" ['']")
return re.sub('.*\] ','',result.replace('/', ',').replace('\\xa0','').replace(' : ',''))
except:
return re.sub('.*\] ','',result.replace('/', ',').replace('\\xa0',''))
def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
result1 = str(html.xpath('//strong[contains(text(),"演員")]/../following-sibling::span/text()')).strip(" ['']")
result2 = str(html.xpath('//strong[contains(text(),"演員")]/../following-sibling::span/a/text()')).strip(" ['']")
return str(result1 + result2).strip('+').replace(",\\xa0","").replace("'","").replace(' ','').replace(',,','').lstrip(',').replace(',',', ')
def getStudio(a):
html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
result1 = str(html.xpath('//strong[contains(text(),"製作")]/../following-sibling::span/text()')).strip(" ['']")
result2 = str(html.xpath('//strong[contains(text(),"製作")]/../following-sibling::span/a/text()')).strip(" ['']")
return str(result1+result2).strip('+').replace("', '",'').replace('"','')
def getRuntime(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//strong[contains(text(),"時長")]/../following-sibling::span/text()')).strip(" ['']")
result2 = str(html.xpath('//strong[contains(text(),"時長")]/../following-sibling::span/a/text()')).strip(" ['']")
return str(result1 + result2).strip('+').rstrip('mi')
def getLabel(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//strong[contains(text(),"系列")]/../following-sibling::span/text()')).strip(" ['']")
result2 = str(html.xpath('//strong[contains(text(),"系列")]/../following-sibling::span/a/text()')).strip(" ['']")
return str(result1 + result2).strip('+').replace("', '",'').replace('"','')
def getNum(a):
html = etree.fromstring(a, etree.HTMLParser())
result1 = str(html.xpath('//strong[contains(text(),"番號")]/../following-sibling::span/text()')).strip(" ['']")
result2 = str(html.xpath('//strong[contains(text(),"番號")]/../following-sibling::span/a/text()')).strip(" ['']")
return str(result1 + result2).strip('+')
def getYear(getRelease):
try:
result = str(re.search('\d{4}',getRelease).group())
return result
except:
return getRelease
def getRelease(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//strong[contains(text(),"時間")]/../following-sibling::span/text()')).strip(" ['']")
result2 = str(html.xpath('//strong[contains(text(),"時間")]/../following-sibling::span/a/text()')).strip(" ['']")
return str(result1 + result2).strip('+')
def getTag(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//strong[contains(text(),"类别")]/../following-sibling::span/text()')).strip(" ['']")
result2 = str(html.xpath('//strong[contains(text(),"类别")]/../following-sibling::span/a/text()')).strip(" ['']")
return str(result1 + result2).strip('+').replace(",\\xa0","").replace("'","").replace(' ','').replace(',,','').lstrip(',')
def getCover(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('/html/body/section/div/div[2]/div[1]/a/img/@src')).strip(" ['']")
if result == '':
result = str(html.xpath('/html/body/section/div/div[3]/div[1]/a/img/@src')).strip(" ['']")
return result
def getDirector(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//strong[contains(text(),"導演")]/../following-sibling::span/text()')).strip(" ['']")
result2 = str(html.xpath('//strong[contains(text(),"導演")]/../following-sibling::span/a/text()')).strip(" ['']")
return str(result1 + result2).strip('+').replace("', '",'').replace('"','')
def getOutline(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="introduction"]/dd/p[1]/text()')).strip(" ['']")
return result
def main(number):
try:
a = get_html('https://javdb.com/search?q=' + number + '&f=all')
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']")
if result1 == '':
a = get_html('https://javdb.com/search?q=' + number.replace('-', '_') + '&f=all')
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']")
b = get_html('https://javdb1.com' + result1)
soup = BeautifulSoup(b, 'lxml')
a = str(soup.find(attrs={'class': 'panel'}))
dic = {
'actor': getActor(a),
'title': getTitle(b).replace("\\n", '').replace(' ', '').replace(getActor(a), '').replace(getNum(a),
'').replace(
'无码', '').replace('有码', '').lstrip(' '),
'studio': getStudio(a),
'outline': getOutline(a),
'runtime': getRuntime(a),
'director': getDirector(a),
'release': getRelease(a),
'number': getNum(a),
'cover': getCover(b),
'imagecut': 0,
'tag': getTag(a),
'label': getLabel(a),
'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': '',
'website': 'https://javdb1.com' + result1,
'source': 'javdb.py',
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'), ) # .encode('UTF-8')
return js
except:
a = get_html('https://javdb.com/search?q=' + number + '&f=all')
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']")
if result1 == '' or result1 == 'null':
a = get_html('https://javdb.com/search?q=' + number.replace('-', '_') + '&f=all')
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//*[@id="videos"]/div/div/a/@href')).strip(" ['']")
b = get_html('https://javdb.com' + result1)
soup = BeautifulSoup(b, 'lxml')
a = str(soup.find(attrs={'class': 'panel'}))
dic = {
'actor': getActor(a),
'title': getTitle(b).replace("\\n", '').replace(' ', '').replace(getActor(a), '').replace(
getNum(a),
'').replace(
'无码', '').replace('有码', '').lstrip(' '),
'studio': getStudio(a),
'outline': getOutline(a),
'runtime': getRuntime(a),
'director': getDirector(a),
'release': getRelease(a),
'number': getNum(a),
'cover': getCover(b),
'imagecut': 0,
'tag': getTag(a),
'label': getLabel(a),
'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': '',
'website': 'https://javdb.com' + result1,
'source': 'javdb.py',
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4,separators=(',', ':'), ) # .encode('UTF-8')
return js
#print(main('061519-861'))

View File

@@ -1,6 +0,0 @@
[proxy]
proxy=127.0.0.1:1080
[Name_Rule]
location_rule='JAV_output/'+actor+'/'+title
naming_rule=number

View File

@@ -1,2 +0,0 @@
pyinstaller --onefile AV_Data_Capture.py
pyinstaller --onefile core.py --hidden-import ADC_function.py --hidden-import fc2fans_club.py --hidden-import javbus.py --hidden-import siro.py

109
siro.py Normal file → Executable file
View File

@@ -1,81 +1,104 @@
import re import re
from lxml import etree from lxml import etree
import json import json
import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from ADC_function import *
def get_html(url):#网页请求核心
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
cookies = {'adc':'1'}
getweb = requests.get(str(url),timeout=10,cookies=cookies,headers=headers).text
try:
return getweb
except:
print("[-]Connect Failed! Please check your Proxy.")
def getTitle(a): def getTitle(a):
try:
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser())
result = str(html.xpath('//*[@id="center_column"]/div[2]/h1/text()')).strip(" ['']") result = str(html.xpath('//*[@id="center_column"]/div[2]/h1/text()')).strip(" ['']")
return result return result.replace('/', ',')
except:
return ''
def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text() def getActor(a): #//*[@id="center_column"]/div[2]/div[1]/div/table/tbody/tr[1]/td/text()
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
result=str(html.xpath('//table[2]/tr[1]/td/a/text()')).strip(" ['\\n ']") result1=str(html.xpath('//th[contains(text(),"出演:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip('\\n')
return result result2=str(html.xpath('//th[contains(text(),"出演:")]/../td/text()')).strip(" ['']").strip('\\n ').strip('\\n')
return str(result1+result2).strip('+').replace("', '",'').replace('"','').replace('/',',')
def getStudio(a): def getStudio(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser()) #//table/tr[1]/td[1]/text()
result=str(html.xpath('//table[2]/tr[2]/td/a/text()')).strip(" ['\\n ']") result1=str(html.xpath('//th[contains(text(),"シリーズ:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip('\\n')
return result result2=str(html.xpath('//th[contains(text(),"シリーズ:")]/../td/text()')).strip(" ['']").strip('\\n ').strip('\\n')
return str(result1+result2).strip('+').replace("', '",'').replace('"','')
def getRuntime(a): def getRuntime(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result=str(html.xpath('//table[2]/tr[3]/td/text()')).strip(" ['\\n ']") result1 = str(html.xpath('//th[contains(text(),"収録時間:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip('\\n')
return result result2 = str(html.xpath('//th[contains(text(),"収録時間:")]/../td/text()')).strip(" ['']").strip('\\n ').strip('\\n')
return str(result1 + result2).strip('+').rstrip('mi')
def getLabel(a):
html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result1 = str(html.xpath('//th[contains(text(),"シリーズ:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
'\\n')
result2 = str(html.xpath('//th[contains(text(),"シリーズ:")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
'\\n')
return str(result1 + result2).strip('+').replace("', '",'').replace('"','')
def getNum(a): def getNum(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result=str(html.xpath('//table[2]/tr[4]/td/text()')).strip(" ['\\n ']") result1 = str(html.xpath('//th[contains(text(),"品番:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
return result '\\n')
def getYear(a): result2 = str(html.xpath('//th[contains(text(),"品番:")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
html = etree.fromstring(a, etree.HTMLParser()) '\\n')
#result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']") return str(result1 + result2).strip('+')
result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']") def getYear(getRelease):
try:
result = str(re.search('\d{4}',getRelease).group())
return result return result
except:
return getRelease
def getRelease(a): def getRelease(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result=str(html.xpath('//table[2]/tr[5]/td/text()')).strip(" ['\\n ']") result1 = str(html.xpath('//th[contains(text(),"配信開始日:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
return result '\\n')
result2 = str(html.xpath('//th[contains(text(),"配信開始日:")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
'\\n')
return str(result1 + result2).strip('+')
def getTag(a): def getTag(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result=str(html.xpath('//table[2]/tr[9]/td/text()')).strip(" ['\\n ']") result1 = str(html.xpath('//th[contains(text(),"ジャンル:")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
return result '\\n')
result2 = str(html.xpath('//th[contains(text(),"ジャンル:")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
'\\n')
return str(result1 + result2).strip('+').replace("', '\\n",",").replace("', '","").replace('"','')
def getCover(htmlcode): def getCover(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser()) html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="center_column"]/div[2]/div[1]/div/div/h2/img/@src')).strip(" ['']") result = str(html.xpath('//*[@id="center_column"]/div[2]/div[1]/div/div/h2/img/@src')).strip(" ['']")
return result return result
def getDirector(a): def getDirector(a):
html = etree.fromstring(a, etree.HTMLParser()) html = etree.fromstring(a, etree.HTMLParser()) # //table/tr[1]/td[1]/text()
result = str(html.xpath('//table[2]/tr[7]/td/a/text()')).strip(" ['\\n ']") result1 = str(html.xpath('//th[contains(text(),"シリーズ")]/../td/a/text()')).strip(" ['']").strip('\\n ').strip(
return result '\\n')
result2 = str(html.xpath('//th[contains(text(),"シリーズ")]/../td/text()')).strip(" ['']").strip('\\n ').strip(
'\\n')
return str(result1 + result2).strip('+').replace("', '",'').replace('"','')
def getOutline(htmlcode): def getOutline(htmlcode):
html = etree.fromstring(htmlcode, etree.HTMLParser()) html = etree.fromstring(htmlcode, etree.HTMLParser())
result = str(html.xpath('//*[@id="introduction"]/dd/p[1]/text()')).strip(" ['']") result = str(html.xpath('//*[@id="introduction"]/dd/p[1]/text()')).strip(" ['']")
return result return result
def main(number2):
def main(number): number=number2.upper()
htmlcode=get_html('https://www.mgstage.com/product/product_detail/'+str(number)) htmlcode=get_html('https://www.mgstage.com/product/product_detail/'+str(number)+'/',cookies={'adc':'1'})
soup = BeautifulSoup(htmlcode, 'lxml') soup = BeautifulSoup(htmlcode, 'lxml')
a = str(soup.find(attrs={'class': 'detail_data'})).replace('\n ','') a = str(soup.find(attrs={'class': 'detail_data'})).replace('\n ','').replace(' ','').replace('\n ','').replace('\n ','')
dic = { dic = {
'title': getTitle(htmlcode).replace("\\n",'').replace(' ',''), 'title': getTitle(htmlcode).replace("\\n",'').replace(' ',''),
'studio': getStudio(a), 'studio': getStudio(a),
'year': getYear(a),
'outline': getOutline(htmlcode), 'outline': getOutline(htmlcode),
'runtime': getRuntime(a), 'runtime': getRuntime(a),
'director': getDirector(a), 'director': getDirector(a),
'actor': getActor(a), 'actor': getActor(a),
'release': getRelease(a), 'release': getRelease(a),
'number': number, 'number': getNum(a),
'cover': getCover(htmlcode), 'cover': getCover(htmlcode),
'imagecut': 0, 'imagecut': 0,
'tag':' ', 'tag': getTag(a),
'label':getLabel(a),
'year': getYear(getRelease(a)), # str(re.search('\d{4}',getRelease(a)).group()),
'actor_photo': '',
'website':'https://www.mgstage.com/product/product_detail/'+str(number)+'/',
'source': 'siro.py',
} }
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8') js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ':'),)#.encode('UTF-8')
return js return js
#print(main('300maan-373'))

5
update_check.json Normal file
View File

@@ -0,0 +1,5 @@
{
"version": "1.3",
"version_show":"1.3",
"download": "https://github.com/wenead99/AV_Data_Capture/releases"
}