Use the new csv files for nscripter generation

This commit is contained in:
Dusk 2024-02-28 20:20:34 +01:00
parent ac163fb05a
commit 77db14386b
2 changed files with 24 additions and 12 deletions

View File

@ -14,6 +14,8 @@ import fix
debug_current_line = -1 debug_current_line = -1
def process_sections(): def process_sections():
output_path = config.get('output_path')
output_filepath = os.path.join(config.get('output_path'), 'out.txt') output_filepath = os.path.join(config.get('output_path'), 'out.txt')
outfile = open(output_filepath, 'w', encoding='shift_jisx0213') outfile = open(output_filepath, 'w', encoding='shift_jisx0213')
origfile = open(fix.open_onikakushi(), 'r', encoding='shift_jisx0213') origfile = open(fix.open_onikakushi(), 'r', encoding='shift_jisx0213')
@ -29,12 +31,25 @@ def process_sections():
outfile.write(line) outfile.write(line)
if section_name in flow.onik: # Try to look for a {section_name}.csv file
# csv_paths will contain the array of CSV paths to translation files
csv_path = os.path.join(output_path, 'trans', section_name + '.csv')
csv_paths = [ csv_path ]
if not os.path.exists(csv_path):
# Not found, try to look for a {section_name}/*.csv folder with files
csv_path = os.path.join(output_path, 'trans', section_name)
if not os.path.isdir(csv_path):
continue
root, _, files = next(os.walk(csv_path))
csv_paths = [os.path.join(root, x) for x in files]
csv_paths.sort()
print("entering", section_name) print("entering", section_name)
write_translated( write_translated(
outfile, outfile,
origfile, origfile,
flow.onik[section_name], csv_paths,
) )
print("finished section: ", section_name) print("finished section: ", section_name)
else: else:
@ -76,7 +91,6 @@ def swap_line_text(tokens, translation_lines: list[parser.OutputLine]) -> (str,
def write_translated(outfile, origfile, translation_file_paths): def write_translated(outfile, origfile, translation_file_paths):
for transfilepath in translation_file_paths: for transfilepath in translation_file_paths:
print(f'- reading "{transfilepath}"') print(f'- reading "{transfilepath}"')
parser.parse_to_csv(transfilepath)
structure = parser.parse_to_structure(transfilepath) structure = parser.parse_to_structure(transfilepath)
for line in origfile: for line in origfile:

View File

@ -143,12 +143,10 @@ def parse_to_csv():
def parse_to_structure(filename: str) -> list[OutputLine]: def parse_to_structure(filename: str) -> list[OutputLine]:
out_path = config.get('output_path')
csvname = os.path.join(out_path, filename + ".csv")
escapechar = config.get('csv_escapechar') escapechar = config.get('csv_escapechar')
delchar = config.get('csv_delchar') delchar = config.get('csv_delchar')
with open(csvname, 'r') as csvfile: with open(filename, 'r') as csvfile:
csv_reader = csv.reader( csv_reader = csv.reader(
csvfile, csvfile,
delimiter=delchar, delimiter=delchar,