fix var name consistency error

change var name to reach human friendly
add type hinting
PEP8 formatting
This commit is contained in:
naughtyGitCat
2022-09-30 16:43:15 +08:00
parent 50ef41ee50
commit 78298495ac
3 changed files with 30 additions and 18 deletions

View File

@@ -1,11 +1,12 @@
# build-in lib
import json
import secrets
import typing
from pathlib import Path
# third party lib
import opencc
from lxml import etree
# project wide definitions
import config
from ADC_function import (translate,
@@ -17,9 +18,17 @@ from ADC_function import (translate,
from scrapinglib.api import search
def get_data_from_json(file_number, oCC, specified_source, specified_url):
def get_data_from_json(
file_number: str,
open_cc: opencc.OpenCC,
specified_source: str, specified_url: str) -> typing.Optional[dict]:
"""
iterate through all services and fetch the data 从JSON返回元数据
iterate through all services and fetch the data 从网站上查询片名解析JSON返回元数据
:param file_number: 影片名称
:param open_cc: 简繁转换器
:param specified_source: 指定的媒体数据源
:param specified_url: 指定的数据查询地址, 目前未使用
:return 给定影片名称的具体信息
"""
actor_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_actor.xml'))
@@ -36,12 +45,14 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
if config_proxy.enable:
proxies = config_proxy.proxies()
# javdb website logic
# javdb have suffix
javdb_sites = conf.javdb_sites().split(',')
for i in javdb_sites:
javdb_sites[javdb_sites.index(i)] = "javdb" + i
javdb_sites.append("javdb")
# 不加载过期的cookiejavdb登录界面显示为7天免登录故假定cookie有效期为7天
has_json = False
has_valid_cookie = False
for cj in javdb_sites:
javdb_site = cj
cookie_json = javdb_site + '.json'
@@ -50,12 +61,12 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
cdays = file_modification_days(cookies_filepath)
if cdays < 7:
javdb_cookies = cookies_dict
has_json = True
has_valid_cookie = True
break
elif cdays != 9999:
print(
f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.')
if not has_json:
if not has_valid_cookie:
# get real random site from javdb_sites, because random is not really random when the seed value is known
javdb_site = secrets.choice(javdb_sites)
javdb_cookies = None
@@ -64,11 +75,12 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
if conf.cacert_file():
ca_cert = conf.cacert_file()
json_data = search(file_number, sources, proxies=proxies, verify=cacert,
json_data = search(file_number, sources, proxies=proxies, verify=ca_cert,
dbsite=javdb_site, dbcookies=javdb_cookies,
morestoryline=conf.is_storyline(),
specifiedSource=specified_source, specifiedUrl=specified_url,
debug=conf.debug())
# Return if data not found in all sources
if not json_data:
print('[-]Movie Number not found!')
@@ -190,7 +202,7 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
if len(t):
json_data[translate_value] = special_characters_replacement(t)
if oCC:
if open_cc:
cc_vars = conf.cc_convert_vars().split(",")
ccm = conf.cc_convert_mode()
@@ -223,8 +235,8 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
json_data['actor_list'] = convert_list(actor_mapping_data, "jp", json_data['actor_list'])
json_data['actor'] = convert(actor_mapping_data, "jp", json_data['actor'])
except:
json_data['actor_list'] = [oCC.convert(aa) for aa in json_data['actor_list']]
json_data['actor'] = oCC.convert(json_data['actor'])
json_data['actor_list'] = [open_cc.convert(aa) for aa in json_data['actor_list']]
json_data['actor'] = open_cc.convert(json_data['actor'])
elif cc == "tag":
try:
if ccm == 1:
@@ -237,7 +249,7 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
json_data[cc] = convert_list(info_mapping_data, "jp", json_data[cc])
json_data[cc] = delete_all_elements_in_list("删除", json_data[cc])
except:
json_data[cc] = [oCC.convert(t) for t in json_data[cc]]
json_data[cc] = [open_cc.convert(t) for t in json_data[cc]]
else:
try:
if ccm == 1:
@@ -250,7 +262,7 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
json_data[cc] = convert(info_mapping_data, "jp", json_data[cc])
json_data[cc] = delete_all_elements_in_str("删除", json_data[cc])
except IndexError:
json_data[cc] = oCC.convert(json_data[cc])
json_data[cc] = open_cc.convert(json_data[cc])
except:
pass