From ab35815fc5af62b5356c33ca597d3597c4f021b8 Mon Sep 17 00:00:00 2001 From: dusk Date: Sun, 18 Feb 2024 17:11:31 +0100 Subject: [PATCH] WIP 18/2/24 --- src/flow.py | 46 +++++++++++++++++++++++++++++ src/main.py | 2 +- src/orig.py | 80 ++++++++++++++++++++++++++++++++++++++++++--------- src/parser.py | 37 ++++++++++++++---------- 4 files changed, 135 insertions(+), 30 deletions(-) create mode 100644 src/flow.py diff --git a/src/flow.py b/src/flow.py new file mode 100644 index 0000000..c45e745 --- /dev/null +++ b/src/flow.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +onik = { + "gamestart" : [ + 'onik_000.txt', + 'onik_001.txt', + 'onik_002.txt', + 'onik_003.txt', + 'onik_004.txt', + 'onik_005.txt', + 'onik_009.txt', + 'onik_009_02.txt', + 'onik_010.txt', + 'onik_011.txt', + 'onik_012.txt', + 'onik_013.txt', + 'onik_014.txt', + 'onik_014_02.txt', + 'onik_015.txt', + 'onik_015_02.txt', + 'onik_015_03.txt', + ], + "Opening" : [ 'onik_op.txt' ], + "Sub_Tips_001" : [ 'onik_tips_01.txt' ], + "Sub_Tips_002" : [ 'onik_tips_02.txt' ], + "Sub_Tips_003" : [ 'onik_tips_03.txt' ], + "Sub_Tips_004" : [ 'onik_tips_04.txt' ], + "Sub_Tips_005" : [ 'onik_tips_05.txt' ], + "Sub_Tips_006" : [ 'onik_tips_06.txt' ], + "Sub_Tips_006" : [ 'onik_tips_06.txt' ], + "Sub_Tips_007" : [ 'onik_tips_07.txt' ], + "Sub_Tips_008" : [ 'onik_tips_08.txt' ], + "Sub_Tips_009" : [ 'onik_tips_09.txt' ], + "Sub_Tips_010" : [ 'onik_tips_10.txt' ], + "Sub_Tips_011" : [ 'onik_tips_11.txt' ], + "Sub_Tips_012" : [ 'onik_tips_12.txt' ], + "Sub_Tips_013" : [ 'onik_tips_13.txt' ], + "Sub_Tips_014" : [ 'onik_tips_14.txt' ], + "Sub_Tips_015" : [ 'onik_tips_15.txt' ], + "Sub_Tips_016" : [ 'onik_tips_16.txt' ], + "Sub_Tips_016" : [ 'onik_tips_16.txt' ], + "Sub_Tips_017" : [ 'onik_tips_17.txt' ], + "Sub_Tips_018" : [ 'onik_tips_18.txt' ], + "Sub_Tips_019" : [ 'onik_tips_19.txt' ], + "Sub_Tips_020" : [ 'onik_tips_20.txt' ], +} diff --git a/src/main.py b/src/main.py index 4250090..e3c634d 100755 --- a/src/main.py +++ b/src/main.py @@ -4,7 +4,7 @@ import orig def main(): - orig.write_translated() + orig.process_sections() if __name__ == "__main__": diff --git a/src/orig.py b/src/orig.py index c554ac5..df9ffdd 100755 --- a/src/orig.py +++ b/src/orig.py @@ -6,6 +6,7 @@ from unidecode import unidecode import config import parser +import flow japanese_ranges = [ (0x4E00, 0x9FFF), # Kanji @@ -16,32 +17,83 @@ japanese_ranges = [ ] -def write_translated(): - translation = parser.parse_to_tokens() - +def process_sections(): output_filepath = os.path.join(config.get('output_path'), 'out.txt') outfile = open(output_filepath, 'w', encoding='shift_jisx0213') + origfile = open(config.get('original_path'), 'r', encoding='shift_jisx0213') - with open(config.get('original_path'), 'r', encoding='shift_jisx0213') as file: - for line in file: + sections = dict() + + for line in origfile: + if line.startswith('*'): + section_name = line[1:].split(' ', 1)[0].replace('\n','') + + outfile.write(line) + + if section_name in flow.onik: + print("entering", section_name) + write_translated( + outfile, + origfile, + flow.onik[section_name], + ) + else: + outfile.write(line) + + outfile.close() + origfile.close() + + +# Given a set of translation files, the original file and the output file +# replace the japanese lines with the translated ones in a given section. +def write_translated(outfile, origfile, translation_file_paths): + for transfilepath in translation_file_paths: + print(f'- reading "{transfilepath}"') + structure = parser.parse_to_structure(transfilepath) + + for i, line in enumerate(origfile): found = False for start, end in japanese_ranges: if start <= ord(line[0]) <= end: found = True - if found and len(translation) > 0: - # outfile.write(unidecode(translation.pop(0)[1]).replace("\\","¥")) - amount = line.count("@") + line.count("¥") + if found: + # The amount of lines may not coincide because the original + # might have one line for what we have multiple lines. Count + # the number of appearances of the end-of-command symbols + # (@ and ¥) to determine how many of the translated lines is + # equivalent to the given original line. + amount = line.count("@") + line.count("¥") + line.count('/') outfile.write('`') - for _ in range(amount): - outfile.write( - unidecode(translation.pop(0)[1]).replace("\\", "¥")) + _printed_line = "" + while True: + if amount <= 0: + break + + if structure[0][3] == 'Line_ContinueAfterTyping': + amount += 1 + + _printed_line += structure[0][2] + outfile.write( + unidecode(structure.pop(0)[1]).replace("\\", "¥") + ) + + amount -= 1 + + outfile.write('\n') + + print("\n-", transfilepath) + print(">", _printed_line) + print("<", line, end='') + + + # Used up all of the structures, this chapter has ended. + # Got to the next one + if len(structure) <= 0: + break - if amount > 0: - outfile.write('\n') else: outfile.write(line) - outfile.close() diff --git a/src/parser.py b/src/parser.py index 0ce8e76..e839a65 100755 --- a/src/parser.py +++ b/src/parser.py @@ -85,41 +85,48 @@ def get_functions_from_file(filepath: str): return tokens -def parse_to_tokens(): +def parse_to_structure(filename: str): scripts_path = config.get("scripts_path") tokens = get_functions_from_file( - os.path.join(scripts_path, "onik_000.txt") + os.path.join(scripts_path, filename) ) structure = [] for token in tokens: - if token[0] == "OutputLine": - dialogue = token[4][1:-1].replace('\\', '') + function_name = token[0] - if token[-1] == "Line_Normal": + if function_name == "OutputLine": + text_jp = token[2][1:-1] + text_en = token[4][1:-1] + line_type = token[-1] + + dialogue = text_en.replace('\\', '') + + if line_type == "Line_Normal": dialogue += "\\" - elif token[-1] == "Line_WaitForInput": + elif line_type == "Line_WaitForInput": dialogue += "@" - elif token[-1] == "Line_ContinueAfterTyping": + elif line_type == "Line_ContinueAfterTyping": pass else: raise Exception("Unhandled output termination") - structure.append(["OutputLine", dialogue]) + structure.append([ + "OutputLine", + dialogue, + text_jp, + line_type + ]) elif ( - token[0] == "OutputLineAll" + function_name == "OutputLineAll" and "Line_ContinueAfterTyping" == token[-1] and "\\n" in token[2] ): - count = token[2].count("\\n") + pass + #count = token[2].count("\\n") # structure.append(["LineBreak", count]) - # for coso in structure: - # if coso[0] == "OutputLine": - # print(coso[1], end="") - # elif coso[0] == "LineBreak": - # print(":".join(["br"] * coso[1])) return structure