Use the new csv files for nscripter generation
This commit is contained in:
parent
ac163fb05a
commit
77db14386b
32
src/orig.py
32
src/orig.py
|
@ -14,6 +14,8 @@ import fix
|
||||||
debug_current_line = -1
|
debug_current_line = -1
|
||||||
|
|
||||||
def process_sections():
|
def process_sections():
|
||||||
|
output_path = config.get('output_path')
|
||||||
|
|
||||||
output_filepath = os.path.join(config.get('output_path'), 'out.txt')
|
output_filepath = os.path.join(config.get('output_path'), 'out.txt')
|
||||||
outfile = open(output_filepath, 'w', encoding='shift_jisx0213')
|
outfile = open(output_filepath, 'w', encoding='shift_jisx0213')
|
||||||
origfile = open(fix.open_onikakushi(), 'r', encoding='shift_jisx0213')
|
origfile = open(fix.open_onikakushi(), 'r', encoding='shift_jisx0213')
|
||||||
|
@ -29,14 +31,27 @@ def process_sections():
|
||||||
|
|
||||||
outfile.write(line)
|
outfile.write(line)
|
||||||
|
|
||||||
if section_name in flow.onik:
|
# Try to look for a {section_name}.csv file
|
||||||
print("entering", section_name)
|
# csv_paths will contain the array of CSV paths to translation files
|
||||||
write_translated(
|
csv_path = os.path.join(output_path, 'trans', section_name + '.csv')
|
||||||
outfile,
|
csv_paths = [ csv_path ]
|
||||||
origfile,
|
if not os.path.exists(csv_path):
|
||||||
flow.onik[section_name],
|
# Not found, try to look for a {section_name}/*.csv folder with files
|
||||||
)
|
csv_path = os.path.join(output_path, 'trans', section_name)
|
||||||
print("finished section: ", section_name)
|
if not os.path.isdir(csv_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
root, _, files = next(os.walk(csv_path))
|
||||||
|
csv_paths = [os.path.join(root, x) for x in files]
|
||||||
|
csv_paths.sort()
|
||||||
|
|
||||||
|
print("entering", section_name)
|
||||||
|
write_translated(
|
||||||
|
outfile,
|
||||||
|
origfile,
|
||||||
|
csv_paths,
|
||||||
|
)
|
||||||
|
print("finished section: ", section_name)
|
||||||
else:
|
else:
|
||||||
outfile.write(line)
|
outfile.write(line)
|
||||||
|
|
||||||
|
@ -76,7 +91,6 @@ def swap_line_text(tokens, translation_lines: list[parser.OutputLine]) -> (str,
|
||||||
def write_translated(outfile, origfile, translation_file_paths):
|
def write_translated(outfile, origfile, translation_file_paths):
|
||||||
for transfilepath in translation_file_paths:
|
for transfilepath in translation_file_paths:
|
||||||
print(f'- reading "{transfilepath}"')
|
print(f'- reading "{transfilepath}"')
|
||||||
parser.parse_to_csv(transfilepath)
|
|
||||||
structure = parser.parse_to_structure(transfilepath)
|
structure = parser.parse_to_structure(transfilepath)
|
||||||
|
|
||||||
for line in origfile:
|
for line in origfile:
|
||||||
|
|
|
@ -143,12 +143,10 @@ def parse_to_csv():
|
||||||
|
|
||||||
|
|
||||||
def parse_to_structure(filename: str) -> list[OutputLine]:
|
def parse_to_structure(filename: str) -> list[OutputLine]:
|
||||||
out_path = config.get('output_path')
|
|
||||||
csvname = os.path.join(out_path, filename + ".csv")
|
|
||||||
escapechar = config.get('csv_escapechar')
|
escapechar = config.get('csv_escapechar')
|
||||||
delchar = config.get('csv_delchar')
|
delchar = config.get('csv_delchar')
|
||||||
|
|
||||||
with open(csvname, 'r') as csvfile:
|
with open(filename, 'r') as csvfile:
|
||||||
csv_reader = csv.reader(
|
csv_reader = csv.reader(
|
||||||
csvfile,
|
csvfile,
|
||||||
delimiter=delchar,
|
delimiter=delchar,
|
||||||
|
|
Loading…
Reference in New Issue