This commit is contained in:
wataru 2022-12-11 05:05:15 +09:00
parent 5c9767a3e9
commit 3b46081df4
5 changed files with 22 additions and 13 deletions

View File

@ -383,9 +383,9 @@ if __name__ == thisFilename or args.colab == True:
return mod_post_pre_training(batch) return mod_post_pre_training(batch)
@app_fastapi.post("/post_start_training") @app_fastapi.post("/post_start_training")
async def post_start_training(): async def post_start_training(enable_finetuning: bool = Form(...),GModel: str = Form(...),DModel: str = Form(...)):
print("POST START TRAINING..") print("POST START TRAINING..")
return mod_post_start_training() return mod_post_start_training(enable_finetuning, GModel, DModel)
@app_fastapi.post("/post_stop_training") @app_fastapi.post("/post_stop_training")
async def post_stop_training(): async def post_stop_training():

View File

@ -3,17 +3,20 @@ from trainer_mods.files import get_file_list
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder from fastapi.encoders import jsonable_encoder
LOG_DIR = "MMVC_Trainer/info" LOG_DIR = "logs"
train_proc = None train_proc = None
SUCCESS = 0 SUCCESS = 0
ERROR = -1 ERROR = -1
### Submodule for Pre train ### Submodule for Pre train
def sync_exec(cmd:str, log_path:str): def sync_exec(cmd:str, log_path:str, cwd=None):
shortCmdStr = cmd[:20] shortCmdStr = cmd[:20]
try: try:
with open(log_path, 'w') as log_file: with open(log_path, 'w') as log_file:
proc = subprocess.run(cmd, shell=True, text=True, stdout=log_file, stderr=log_file, cwd="MMVC_Trainer") if cwd == None:
proc = subprocess.run(cmd, shell=True, text=True, stdout=log_file, stderr=log_file)
else:
proc = subprocess.run(cmd, shell=True, text=True, stdout=log_file, stderr=log_file, cwd=cwd)
print(f"{shortCmdStr} returncode:{proc.returncode}") print(f"{shortCmdStr} returncode:{proc.returncode}")
if proc.returncode != 0: if proc.returncode != 0:
print(f"{shortCmdStr} exception:") print(f"{shortCmdStr} exception:")
@ -39,7 +42,7 @@ def sync_exec_with_stdout(cmd:str, log_path:str):
def create_dataset(): def create_dataset():
cmd = "python3 create_dataset_jtalk.py -f train_config -s 24000 -m dataset/multi_speaker_correspondence.txt" cmd = "python3 create_dataset_jtalk.py -f train_config -s 24000 -m dataset/multi_speaker_correspondence.txt"
log_file = os.path.join(LOG_DIR, "log_create_dataset_jtalk.txt") log_file = os.path.join(LOG_DIR, "log_create_dataset_jtalk.txt")
res = sync_exec(cmd, log_file) res = sync_exec(cmd, log_file, "MMVC_Trainer")
return res return res
def set_batch_size(batch:int): def set_batch_size(batch:int):
@ -55,7 +58,7 @@ def set_dummy_device_count():
return res return res
### Submodule for Train ### Submodule for Train
def exec_training(): def exec_training(enable_finetuning:bool, GModel:str, DModel:str):
global train_proc global train_proc
log_file = os.path.join(LOG_DIR, "training.txt") log_file = os.path.join(LOG_DIR, "training.txt")
@ -71,7 +74,12 @@ def exec_training():
try: try:
with open(log_file, 'w') as log_file: with open(log_file, 'w') as log_file:
cmd = 'python3 train_ms.py -c configs/train_config.json -m ./' if enable_finetuning == True:
GModelPath = os.path.join("logs", GModel) # 実行時にcwdを指定しているのでフォルダはlogsでよい。
DModelPath = os.path.join("logs", DModel)
cmd = f'python3 train_ms.py -c configs/train_config.json -m ./ -fg {GModelPath} -fd {DModelPath}'
else:
cmd = 'python3 train_ms.py -c configs/train_config.json -m ./'
print("exec:",cmd) print("exec:",cmd)
train_proc = subprocess.Popen("exec "+cmd, shell=True, text=True, stdout=log_file, stderr=log_file, cwd="MMVC_Trainer") train_proc = subprocess.Popen("exec "+cmd, shell=True, text=True, stdout=log_file, stderr=log_file, cwd="MMVC_Trainer")
print("Training stated") print("Training stated")
@ -115,8 +123,9 @@ def mod_post_pre_training(batch:int):
return {"result":"success", "detail": f"Preprocess succeeded. {res[1]}"} return {"result":"success", "detail": f"Preprocess succeeded. {res[1]}"}
def mod_post_start_training(): def mod_post_start_training(enable_finetuning:str, GModel:str, DModel:str):
res = exec_training() print("START_TRAINING:::::::", enable_finetuning, GModel, DModel)
res = exec_training(enable_finetuning, GModel, DModel)
if res[0] == ERROR: if res[0] == ERROR:
return {"result":"failed", "detail": f"Start training failed. {res[1]}"} return {"result":"failed", "detail": f"Start training failed. {res[1]}"}

View File

@ -1,4 +1,4 @@
FROM dannadori/voice-changer-internal:20221209_141145 as front FROM dannadori/voice-changer-internal:20221211_045539 as front
FROM debian:bullseye-slim as base FROM debian:bullseye-slim as base
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive

File diff suppressed because one or more lines are too long

View File

@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
set -eu set -eu
DOCKER_IMAGE=dannadori/voice-changer:20221209_141321 DOCKER_IMAGE=dannadori/voice-changer:20221211_050342
# DOCKER_IMAGE=voice-changer # DOCKER_IMAGE=voice-changer
if [ $# = 0 ]; then if [ $# = 0 ]; then