add type hinting

PEP8 formatting
This commit is contained in:
naughtyGitCat
2022-09-16 18:23:20 +08:00
parent daedd3071c
commit f56400a56b
4 changed files with 58 additions and 43 deletions

View File

@@ -350,7 +350,7 @@ def translate(
return trans_result return trans_result
def load_cookies(cookie_json_filename: str): def load_cookies(cookie_json_filename: str) -> typing.Tuple[typing.Optional[dict], typing.Optional[str]]:
""" """
加载cookie,用于以会员方式访问非游客内容 加载cookie,用于以会员方式访问非游客内容

View File

@@ -563,8 +563,10 @@ class IniProxy():
self.proxytype = proxytype self.proxytype = proxytype
def proxies(self): def proxies(self):
''' 获得代理参数默认http代理 """
''' 获得代理参数默认http代理
get proxy params, use http proxy for default
"""
if self.address: if self.address:
if self.proxytype in self.SUPPORT_PROXY_TYPE: if self.proxytype in self.SUPPORT_PROXY_TYPE:
proxies = {"http": self.proxytype + "://" + self.address, proxies = {"http": self.proxytype + "://" + self.address,

View File

@@ -1,10 +1,19 @@
# build-in lib
import json import json
import secrets import secrets
import config
from lxml import etree
from pathlib import Path from pathlib import Path
from ADC_function import delete_all_elements_in_list, delete_all_elements_in_str, file_modification_days, load_cookies, translate # third party lib
from lxml import etree
# project wide definitions
import config
from ADC_function import (translate,
load_cookies,
file_modification_days,
delete_all_elements_in_str,
delete_all_elements_in_list
)
from scrapinglib.api import search from scrapinglib.api import search
@@ -22,10 +31,10 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
# TODO 准备参数 # TODO 准备参数
# - 清理 ADC_function, webcrawler # - 清理 ADC_function, webcrawler
proxies = None proxies: dict = None
configProxy = conf.proxy() config_proxy = conf.proxy()
if configProxy.enable: if config_proxy.enable:
proxies = configProxy.proxies() proxies = config_proxy.proxies()
javdb_sites = conf.javdb_sites().split(',') javdb_sites = conf.javdb_sites().split(',')
for i in javdb_sites: for i in javdb_sites:
@@ -44,16 +53,18 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
has_json = True has_json = True
break break
elif cdays != 9999: elif cdays != 9999:
print(f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.') print(
f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.')
if not has_json: if not has_json:
# get real random site from javdb_sites, because random is not really random when the seed value is known
javdb_site = secrets.choice(javdb_sites) javdb_site = secrets.choice(javdb_sites)
javdb_cookies = None javdb_cookies = None
cacert =None ca_cert = None
if conf.cacert_file(): if conf.cacert_file():
cacert = conf.cacert_file() ca_cert = conf.cacert_file()
json_data = search(file_number, sources, proxies=proxies, verify=cacert, json_data = search(file_number, sources, proxies=proxies, verify=ca_cert,
dbsite=javdb_site, dbcookies=javdb_cookies, dbsite=javdb_site, dbcookies=javdb_cookies,
morestoryline=conf.is_storyline(), morestoryline=conf.is_storyline(),
specifiedSource=specified_source, specifiedUrl=specified_url) specifiedSource=specified_source, specifiedUrl=specified_url)
@@ -195,6 +206,7 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
return mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)[0] return mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)[0]
else: else:
raise IndexError('keyword not found') raise IndexError('keyword not found')
for cc in cc_vars: for cc in cc_vars:
if json_data[cc] == "" or len(json_data[cc]) == 0: if json_data[cc] == "" or len(json_data[cc]) == 0:
continue continue

View File

@@ -51,7 +51,7 @@ def getSupportedSources(tag='adult'):
return ','.join(sc.general_full_sources) return ','.join(sc.general_full_sources)
class Scraping(): class Scraping:
""" """
""" """
adult_full_sources = ['javlibrary', 'javdb', 'javbus', 'airav', 'fanza', 'xcity', 'jav321', adult_full_sources = ['javlibrary', 'javdb', 'javbus', 'airav', 'fanza', 'xcity', 'jav321',
@@ -200,6 +200,7 @@ class Scraping():
sources = self.adult_full_sources sources = self.adult_full_sources
else: else:
sources = c_sources.split(',') sources = c_sources.split(',')
def insert(sources, source): def insert(sources, source):
if source in sources: if source in sources:
sources.insert(0, sources.pop(sources.index(source))) sources.insert(0, sources.pop(sources.index(source)))