add type hinting
PEP8 formatting
This commit is contained in:
@@ -350,7 +350,7 @@ def translate(
|
||||
return trans_result
|
||||
|
||||
|
||||
def load_cookies(cookie_json_filename: str):
|
||||
def load_cookies(cookie_json_filename: str) -> typing.Tuple[typing.Optional[dict], typing.Optional[str]]:
|
||||
"""
|
||||
加载cookie,用于以会员方式访问非游客内容
|
||||
|
||||
|
||||
@@ -563,8 +563,10 @@ class IniProxy():
|
||||
self.proxytype = proxytype
|
||||
|
||||
def proxies(self):
|
||||
''' 获得代理参数,默认http代理
|
||||
'''
|
||||
"""
|
||||
获得代理参数,默认http代理
|
||||
get proxy params, use http proxy for default
|
||||
"""
|
||||
if self.address:
|
||||
if self.proxytype in self.SUPPORT_PROXY_TYPE:
|
||||
proxies = {"http": self.proxytype + "://" + self.address,
|
||||
|
||||
34
scraper.py
34
scraper.py
@@ -1,10 +1,19 @@
|
||||
# build-in lib
|
||||
import json
|
||||
import secrets
|
||||
import config
|
||||
from lxml import etree
|
||||
from pathlib import Path
|
||||
|
||||
from ADC_function import delete_all_elements_in_list, delete_all_elements_in_str, file_modification_days, load_cookies, translate
|
||||
# third party lib
|
||||
from lxml import etree
|
||||
|
||||
# project wide definitions
|
||||
import config
|
||||
from ADC_function import (translate,
|
||||
load_cookies,
|
||||
file_modification_days,
|
||||
delete_all_elements_in_str,
|
||||
delete_all_elements_in_list
|
||||
)
|
||||
from scrapinglib.api import search
|
||||
|
||||
|
||||
@@ -22,10 +31,10 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
|
||||
|
||||
# TODO 准备参数
|
||||
# - 清理 ADC_function, webcrawler
|
||||
proxies = None
|
||||
configProxy = conf.proxy()
|
||||
if configProxy.enable:
|
||||
proxies = configProxy.proxies()
|
||||
proxies: dict = None
|
||||
config_proxy = conf.proxy()
|
||||
if config_proxy.enable:
|
||||
proxies = config_proxy.proxies()
|
||||
|
||||
javdb_sites = conf.javdb_sites().split(',')
|
||||
for i in javdb_sites:
|
||||
@@ -44,16 +53,18 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
|
||||
has_json = True
|
||||
break
|
||||
elif cdays != 9999:
|
||||
print(f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.')
|
||||
print(
|
||||
f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.')
|
||||
if not has_json:
|
||||
# get real random site from javdb_sites, because random is not really random when the seed value is known
|
||||
javdb_site = secrets.choice(javdb_sites)
|
||||
javdb_cookies = None
|
||||
|
||||
cacert =None
|
||||
ca_cert = None
|
||||
if conf.cacert_file():
|
||||
cacert = conf.cacert_file()
|
||||
ca_cert = conf.cacert_file()
|
||||
|
||||
json_data = search(file_number, sources, proxies=proxies, verify=cacert,
|
||||
json_data = search(file_number, sources, proxies=proxies, verify=ca_cert,
|
||||
dbsite=javdb_site, dbcookies=javdb_cookies,
|
||||
morestoryline=conf.is_storyline(),
|
||||
specifiedSource=specified_source, specifiedUrl=specified_url)
|
||||
@@ -195,6 +206,7 @@ def get_data_from_json(file_number, oCC, specified_source, specified_url):
|
||||
return mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)[0]
|
||||
else:
|
||||
raise IndexError('keyword not found')
|
||||
|
||||
for cc in cc_vars:
|
||||
if json_data[cc] == "" or len(json_data[cc]) == 0:
|
||||
continue
|
||||
|
||||
@@ -51,7 +51,7 @@ def getSupportedSources(tag='adult'):
|
||||
return ','.join(sc.general_full_sources)
|
||||
|
||||
|
||||
class Scraping():
|
||||
class Scraping:
|
||||
"""
|
||||
"""
|
||||
adult_full_sources = ['javlibrary', 'javdb', 'javbus', 'airav', 'fanza', 'xcity', 'jav321',
|
||||
@@ -200,6 +200,7 @@ class Scraping():
|
||||
sources = self.adult_full_sources
|
||||
else:
|
||||
sources = c_sources.split(',')
|
||||
|
||||
def insert(sources, source):
|
||||
if source in sources:
|
||||
sources.insert(0, sources.pop(sources.index(source)))
|
||||
|
||||
Reference in New Issue
Block a user