140 lines
4.4 KiB
Python
Executable File
140 lines
4.4 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
import os
|
|
import sys
|
|
|
|
from unidecode import unidecode
|
|
|
|
import scripter
|
|
import config
|
|
import parser
|
|
import flow
|
|
import fix
|
|
|
|
debug_current_line = 0
|
|
|
|
def process_sections():
|
|
output_path = config.get('output_path')
|
|
|
|
output_filepath = os.path.join(config.get('output_path'), 'out.txt')
|
|
outfile = open(output_filepath, 'w', encoding='shift_jisx0213')
|
|
origfile = open(fix.open_onikakushi(), 'r', encoding='shift_jisx0213')
|
|
|
|
sections = dict()
|
|
|
|
for line in origfile:
|
|
global debug_current_line
|
|
debug_current_line += 1
|
|
|
|
if line.startswith('*'):
|
|
section_name = line[1:].split(' ', 1)[0].replace('\n','')
|
|
|
|
outfile.write(line)
|
|
|
|
# Try to look for a {section_name}.csv file
|
|
# csv_paths will contain the array of CSV paths to translation files
|
|
csv_path = os.path.join(output_path, 'trans', section_name + '.csv')
|
|
csv_paths = [ csv_path ]
|
|
if not os.path.exists(csv_path):
|
|
# Not found, try to look for a {section_name}/*.csv folder with files
|
|
csv_path = os.path.join(output_path, 'trans', section_name)
|
|
if not os.path.isdir(csv_path):
|
|
continue
|
|
|
|
root, _, files = next(os.walk(csv_path))
|
|
csv_paths = [os.path.join(root, x) for x in files]
|
|
csv_paths.sort()
|
|
|
|
print("entering", section_name)
|
|
write_translated(
|
|
outfile,
|
|
origfile,
|
|
csv_paths,
|
|
)
|
|
print("finished section: ", section_name)
|
|
else:
|
|
outfile.write(line)
|
|
|
|
outfile.close()
|
|
origfile.close()
|
|
|
|
|
|
def swap_line_text(tokens, translation_lines: list[parser.OutputLine]) -> (str, str, int):
|
|
"""
|
|
Given a token list and a buffer with lines, replace the text tokens
|
|
with lines from the line_buffer.
|
|
|
|
Returns the swapped token list and the amount of lines consumed.
|
|
"""
|
|
|
|
# Lists are pointers to arrays, don't mutate it
|
|
ret_en = ''
|
|
ret_jp = ''
|
|
lines_written = 0
|
|
|
|
for token in tokens:
|
|
if token.type == scripter.TokenType.TEXT:
|
|
ret_en += '`'+unidecode(translation_lines[lines_written].text_en)+'`'
|
|
ret_jp += translation_lines[lines_written].text_jp
|
|
lines_written += 1
|
|
else:
|
|
ret_en += token.token
|
|
ret_jp += token.token
|
|
|
|
return ret_en, ret_jp, lines_written
|
|
|
|
|
|
|
|
|
|
# Given a set of translation files, the original file and the output file
|
|
# replace the japanese lines with the translated ones in a given section.
|
|
def write_translated(outfile, origfile, translation_file_paths):
|
|
for transfilepath in translation_file_paths:
|
|
print(f'- reading "{transfilepath}"')
|
|
structure = parser.parse_to_structure(transfilepath)
|
|
|
|
for line in origfile:
|
|
# --- Debug ---
|
|
global debug_current_line
|
|
debug_current_line += 1
|
|
# -------------
|
|
|
|
tokens = scripter.parse_line(line)
|
|
|
|
# Replace the text tokens with the translated ones
|
|
line_en, line_jp, lines_written = swap_line_text(tokens, structure)
|
|
# Remove the lines that have been written
|
|
structure = structure[lines_written:]
|
|
|
|
|
|
if lines_written > 0:
|
|
# --- Debug ---
|
|
print(
|
|
"\n-",
|
|
debug_current_line,
|
|
transfilepath,
|
|
''.join(str(x) for x in tokens),
|
|
)
|
|
print(">", line_en)
|
|
print(">", line_jp)
|
|
print("<", line, end='')
|
|
# -------------
|
|
|
|
# TODO: Bad code
|
|
if line_jp+'\n' != line and not transfilepath.endswith("Opening.csv") and not transfilepath.endswith("Sub_Tips_099.csv"):
|
|
print()
|
|
print(" ------------------------------------------------------")
|
|
print(" ! NO THAT'S WRONG! !")
|
|
print(" ------------------------------------------------------")
|
|
sys.exit(1)
|
|
|
|
# Write the line to the new output
|
|
outfile.write(line_en + '\n')
|
|
|
|
# Used up all of the structures, this chapter has ended.
|
|
# Got to the next one
|
|
if len(structure) <= 0:
|
|
print()
|
|
print(f'- finished "{transfilepath}"')
|
|
break
|