diff --git a/Movie_Data_Capture.py b/Movie_Data_Capture.py index 56182c4..d4a3df3 100644 --- a/Movie_Data_Capture.py +++ b/Movie_Data_Capture.py @@ -104,9 +104,9 @@ is performed. It may help you correct wrong numbers before real job.""") set_str_or_none("common:source_folder", args.path) set_bool_or_none("common:auto_exit", args.auto_exit) set_natural_number_or_none("common:nfo_skip_days", args.days) - set_natural_number_or_none("common:stop_counter", args.cnt) + set_natural_number_or_none("advenced_sleep:stop_counter", args.cnt) set_bool_or_none("common:ignore_failed_list", args.ignore_failed_list) - set_str_or_none("common:rerun_delay", args.delaytm) + set_str_or_none("advenced_sleep:rerun_delay", args.delaytm) set_str_or_none("priority:website", args.site) if isinstance(args.dnimg, bool) and args.dnimg: conf.set_override("common:download_only_missing_images=0") @@ -119,7 +119,7 @@ is performed. It may help you correct wrong numbers before real job.""") if conf.main_mode() == 3: no_net_op = args.no_network_operation if no_net_op: - conf.set_override("common:stop_counter=0;rerun_delay=0s;face:aways_imagecut=1") + conf.set_override("advenced_sleep:stop_counter=0;advenced_sleep:rerun_delay=0s;face:aways_imagecut=1") return args.file, args.number, args.logdir, args.regexstr, args.zero_op, no_net_op, args.specified_source, args.specified_url @@ -681,7 +681,7 @@ def period(delta, pattern): if __name__ == '__main__': - version = '6.5.1' + version = '6.5.2' urllib3.disable_warnings() # Ignore http proxy warning app_start = time.time() diff --git a/core.py b/core.py index e2dde98..dba98da 100644 --- a/core.py +++ b/core.py @@ -347,11 +347,11 @@ def print_files(path, leak_word, c_word, naming_rule, part, cn_sub, json_data, f print("", file=code) if not config.getInstance().jellyfin(): print(" <![CDATA[" + naming_rule + "]]>", file=code) - print(" ", file=code) + print(" ", file=code) print(" ", file=code) else: print(" " + naming_rule + "", file=code) - print(" " + naming_rule + "", file=code) + print(" " + json_data['original_naming_rule'] + "", file=code) print(" " + naming_rule + "", file=code) print(" JP-18+", file=code) print(" JP-18+", file=code) @@ -633,6 +633,8 @@ def paste_file_to_folder_mode2(filepath, path, multi_part, number, part, leak_wo create_softlink = False if link_mode not in (1, 2): shutil.move(filepath, targetpath) + print("[!]Move => ", path) + return elif link_mode == 2: try: os.link(filepath, targetpath, follow_symlinks=False) @@ -644,16 +646,13 @@ def paste_file_to_folder_mode2(filepath, path, multi_part, number, part, leak_wo os.symlink(filerelpath, targetpath) except: os.symlink(str(filepath_obj.resolve()), targetpath) - return + print("[!]Link => ", path) except FileExistsError as fee: print(f'[-]FileExistsError: {fee}') - return except PermissionError: print('[-]Error! Please run as administrator!') - return except OSError as oserr: print(f'[-]OS Error errno {oserr.errno}') - return def linkImage(path, number, part, leak_word, c_word, hack_word, ext): @@ -971,13 +970,9 @@ def core_main(movie_path, number_th, oCC, specified_source=None, specified_url=N path = create_folder(json_data) # 移动文件 paste_file_to_folder_mode2(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word) + # Move subtitles - move_status = move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word) - if move_status: - cn_sub = True - if conf.is_watermark(): - add_mark(os.path.join(path, poster_path), os.path.join(path, thumb_path), cn_sub, leak, uncensored, hack, - _4k) + move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word) elif conf.main_mode() == 3: path = str(Path(movie_path).parent) @@ -1030,5 +1025,5 @@ def core_main(movie_path, number_th, oCC, specified_source=None, specified_url=N # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志 print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path, - tag, json_data.get('actor_list'), liuchu, uncensored, hack, hack_word, fanart_path, poster_path, - _4k, thumb_path) + tag, json_data.get('actor_list'), liuchu, uncensored, hack, hack_word, _4k, fanart_path, poster_path, + thumb_path) diff --git a/scraper.py b/scraper.py index 40a78bb..7e54f52 100644 --- a/scraper.py +++ b/scraper.py @@ -269,14 +269,22 @@ def get_data_from_json( pass naming_rule = "" + original_naming_rule = "" for i in conf.naming_rule().split("+"): if i not in json_data: naming_rule += i.strip("'").strip('"') + original_naming_rule += i.strip("'").strip('"') else: item = json_data.get(i) naming_rule += item if type(item) is not list else "&".join(item) + # PATCH:处理[title]存在翻译的情况,后续NFO文件的original_name只会直接沿用naming_rule,这导致original_name非原始名 + # 理应在翻译处处理 naming_rule和original_naming_rule + if i == 'title': + item = json_data.get('original_title') + original_naming_rule += item if type(item) is not list else "&".join(item) json_data['naming_rule'] = naming_rule + json_data['original_naming_rule'] = original_naming_rule return json_data diff --git a/scrapinglib/fanza.py b/scrapinglib/fanza.py index e8b1917..e9b25c3 100644 --- a/scrapinglib/fanza.py +++ b/scrapinglib/fanza.py @@ -49,15 +49,13 @@ class Fanza(Parser): self.detailurl = url + fanza_search_number url = "https://www.dmm.co.jp/age_check/=/declared=yes/?"+ urlencode({"rurl": self.detailurl}) self.htmlcode = self.getHtml(url) - if "Sorry! This content is not available in your region." in self.htmlcode: - continue - if self.htmlcode != 404: + if self.htmlcode != 404 \ + and 'Sorry! This content is not available in your region.' not in self.htmlcode: self.htmltree = etree.HTML(self.htmlcode) - break - if self.htmlcode == 404: - return 404 - result = self.dictformat(self.htmltree) - return result + if self.htmltree is not None: + result = self.dictformat(self.htmltree) + return result + return 404 def getNum(self, htmltree): # for some old page, the input number does not match the page