This commit is contained in:
wataru 2022-11-08 19:00:47 +09:00
parent 0fc7d4c873
commit fbff691ffe
19 changed files with 526 additions and 69 deletions

View File

@ -20,9 +20,23 @@ from datetime import datetime
import torch import torch
import numpy as np import numpy as np
from mods.ssl import create_self_signed_cert from mods.ssl import create_self_signed_cert
from mods.VoiceChanger import VoiceChanger from mods.VoiceChanger import VoiceChanger
# from mods.Whisper import Whisper
# File Uploader
from mods.FileUploader import upload_file, concat_file_chunks
# Trainer Rest Internal
from mods.Trainer_Speakers import mod_get_speakers
from mods.Trainer_Speaker import mod_delete_speaker
from mods.Trainer_Speaker_Voices import mod_get_speaker_voices
from mods.Trainer_Speaker_Voice import mod_get_speaker_voice
from mods.Trainer_MultiSpeakerSetting import mod_get_multi_speaker_setting, mod_post_multi_speaker_setting
from mods.Trainer_Models import mod_get_models
from mods.Trainer_Model import mod_get_model, mod_delete_model
from mods.Trainer_Training import mod_post_pre_training, mod_post_start_training, mod_post_stop_training, mod_get_related_files, mod_get_tail_training_log
class UvicornSuppressFilter(logging.Filter): class UvicornSuppressFilter(logging.Filter):
def filter(self, record): def filter(self, record):
@ -131,6 +145,24 @@ args = parser.parse_args()
printMessage(f"Phase name:{__name__}", level=2) printMessage(f"Phase name:{__name__}", level=2)
thisFilename = os.path.basename(__file__)[:-3] thisFilename = os.path.basename(__file__)[:-3]
from typing import Callable, List
from fastapi import Body, FastAPI, HTTPException, Request, Response
from fastapi.exceptions import RequestValidationError
from fastapi.routing import APIRoute
class ValidationErrorLoggingRoute(APIRoute):
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def custom_route_handler(request: Request) -> Response:
try:
return await original_route_handler(request)
except Exception as exc:
print("Exception", request.url, str(exc))
body = await request.body()
detail = {"errors": exc.errors(), "body": body.decode()}
raise HTTPException(status_code=422, detail=detail)
return custom_route_handler
if __name__ == thisFilename or args.colab == True: if __name__ == thisFilename or args.colab == True:
printMessage(f"PHASE3:{__name__}", level=2) printMessage(f"PHASE3:{__name__}", level=2)
@ -139,6 +171,7 @@ if __name__ == thisFilename or args.colab == True:
MODEL = args.m MODEL = args.m
app_fastapi = FastAPI() app_fastapi = FastAPI()
app_fastapi.router.route_class = ValidationErrorLoggingRoute
app_fastapi.add_middleware( app_fastapi.add_middleware(
CORSMiddleware, CORSMiddleware,
allow_origins=["*"], allow_origins=["*"],
@ -149,6 +182,10 @@ if __name__ == thisFilename or args.colab == True:
app_fastapi.mount("/front", StaticFiles(directory="../frontend/dist", html=True), name="static") app_fastapi.mount("/front", StaticFiles(directory="../frontend/dist", html=True), name="static")
app_fastapi.mount("/trainer", StaticFiles(directory="../frontend/dist", html=True), name="static")
app_fastapi.mount("/recorder", StaticFiles(directory="../frontend/dist", html=True), name="static")
sio = socketio.AsyncServer( sio = socketio.AsyncServer(
async_mode='asgi', async_mode='asgi',
cors_allowed_origins='*' cors_allowed_origins='*'
@ -178,36 +215,20 @@ if __name__ == thisFilename or args.colab == True:
return {"result": "Index"} return {"result": "Index"}
UPLOAD_DIR = "model_upload_dir" ############
# File Uploder
# ##########
UPLOAD_DIR = "upload_dir"
os.makedirs(UPLOAD_DIR, exist_ok=True) os.makedirs(UPLOAD_DIR, exist_ok=True)
# Can colab receive post request "ONLY" at root path? MODEL_DIR = "/MMVC_Trainer/logs"
@app_fastapi.post("/upload_model_file") os.makedirs(MODEL_DIR, exist_ok=True)
async def upload_file(configFile:UploadFile = File(...), modelFile: UploadFile = File(...)):
if configFile and modelFile:
for file in [modelFile, configFile]:
filename = file.filename
fileobj = file.file
upload_dir = open(os.path.join(UPLOAD_DIR, filename),'wb+')
shutil.copyfileobj(fileobj, upload_dir)
upload_dir.close()
namespace.loadModel(os.path.join(UPLOAD_DIR, configFile.filename), os.path.join(UPLOAD_DIR, modelFile.filename))
return {"uploaded files": f"{configFile.filename}, {modelFile.filename} "}
return {"Error": "uploaded file is not found."}
@app_fastapi.post("/upload_file") @app_fastapi.post("/upload_file")
async def post_upload_file( async def post_upload_file(
file:UploadFile = File(...), file:UploadFile = File(...),
filename: str = Form(...) filename: str = Form(...)
): ):
return upload_file(UPLOAD_DIR, file, filename)
if file and filename:
fileobj = file.file
upload_dir = open(os.path.join(UPLOAD_DIR, filename),'wb+')
shutil.copyfileobj(fileobj, upload_dir)
upload_dir.close()
return {"uploaded files": f"{filename} "}
return {"Error": "uploaded file is not found."}
@app_fastapi.post("/load_model") @app_fastapi.post("/load_model")
async def post_load_model( async def post_load_model(
@ -216,33 +237,40 @@ if __name__ == thisFilename or args.colab == True:
configFilename: str = Form(...) configFilename: str = Form(...)
): ):
target_file_name = modelFilename modelFilePath = concat_file_chunks(UPLOAD_DIR, modelFilename, modelFilenameChunkNum,UPLOAD_DIR)
with open(os.path.join(UPLOAD_DIR, target_file_name), "ab") as target_file: print(f'File saved to: {modelFilePath}')
for i in range(modelFilenameChunkNum): configFilePath = os.path.join(UPLOAD_DIR, configFilename)
filename = f"{modelFilename}_{i}"
chunk_file_path = os.path.join(UPLOAD_DIR,filename)
stored_chunk_file = open(chunk_file_path, 'rb')
target_file.write(stored_chunk_file.read())
stored_chunk_file.close()
os.unlink(chunk_file_path)
target_file.close()
print(f'File saved to: {target_file_name}')
print(f'Load: {configFilename}, {target_file_name}') namespace.loadModel(configFilePath, modelFilePath)
namespace.loadModel(os.path.join(UPLOAD_DIR, configFilename), os.path.join(UPLOAD_DIR, target_file_name)) return {"load": f"{modelFilePath}, {configFilePath}"}
return {"File saved to": f"{target_file_name}"}
@app_fastapi.post("/load_model_for_train")
async def post_load_model_for_train(
modelGFilename: str = Form(...),
modelGFilenameChunkNum: int = Form(...),
modelDFilename: str = Form(...),
modelDFilenameChunkNum: int = Form(...),
):
modelGFilePath = concat_file_chunks(UPLOAD_DIR, modelGFilename, modelGFilenameChunkNum, MODEL_DIR)
modelDFilePath = concat_file_chunks(UPLOAD_DIR, modelDFilename, modelDFilenameChunkNum,MODEL_DIR)
return {"File saved": f"{modelGFilePath}, {modelDFilePath}"}
@app_fastapi.get("/transcribe")
def get_transcribe():
try:
namespace.transcribe()
except Exception as e:
print("TRANSCRIBE PROCESSING!!!! EXCEPTION!!!", e)
print(traceback.format_exc())
return str(e)
@app_fastapi.post("/extract_voices")
async def post_load_model(
zipFilename: str = Form(...),
zipFileChunkNum: int = Form(...),
):
zipFilePath = concat_file_chunks(UPLOAD_DIR, zipFilename, zipFileChunkNum, UPLOAD_DIR)
shutil.unpack_archive(zipFilePath, "/MMVC_Trainer/dataset/textful/")
return {"Zip file unpacked": f"{zipFilePath}"}
############
# Voice Changer
# ##########
@app_fastapi.post("/test") @app_fastapi.post("/test")
async def post_test(voice:VoiceModel): async def post_test(voice:VoiceModel):
try: try:
@ -284,6 +312,68 @@ if __name__ == thisFilename or args.colab == True:
return str(e) return str(e)
# Trainer REST API ※ ColabがTop直下のパスにしかPOSTを投げれないようなので"REST風"
@app_fastapi.get("/get_speakers")
async def get_speakers():
return mod_get_speakers()
@app_fastapi.delete("/delete_speaker")
async def delete_speaker(speaker:str= Form(...)):
return mod_delete_speaker(speaker)
@app_fastapi.get("/get_speaker_voices")
async def get_speaker_voices(speaker:str):
return mod_get_speaker_voices(speaker)
@app_fastapi.get("/get_speaker_voice")
async def get_speaker_voices(speaker:str, voice:str):
return mod_get_speaker_voice(speaker, voice)
@app_fastapi.get("/get_multi_speaker_setting")
async def get_multi_speaker_setting():
return mod_get_multi_speaker_setting()
@app_fastapi.post("/post_multi_speaker_setting")
async def post_multi_speaker_setting(setting: str = Form(...)):
return mod_post_multi_speaker_setting(setting)
@app_fastapi.get("/get_models")
async def get_models():
return mod_get_models()
@app_fastapi.get("/get_model")
async def get_model(model:str):
return mod_get_model(model)
@app_fastapi.delete("/delete_model")
async def delete_model(model:str= Form(...)):
return mod_delete_model(model)
@app_fastapi.post("/post_pre_training")
async def post_pre_training(batch:int= Form(...)):
return mod_post_pre_training(batch)
@app_fastapi.post("/post_start_training")
async def post_start_training():
print("POST START TRAINING..")
return mod_post_start_training()
@app_fastapi.post("/post_stop_training")
async def post_stop_training():
print("POST STOP TRAINING..")
return mod_post_stop_training()
@app_fastapi.get("/get_related_files")
async def get_related_files():
return mod_get_related_files()
@app_fastapi.get("/get_tail_training_log")
async def get_tail_training_log(num:int):
return mod_get_tail_training_log(num)
if __name__ == '__mp_main__': if __name__ == '__mp_main__':
printMessage(f"PHASE2:{__name__}", level=2) printMessage(f"PHASE2:{__name__}", level=2)

27
demo/mods/FileUploader.py Executable file
View File

@ -0,0 +1,27 @@
import os, shutil
from fastapi import UploadFile
# UPLOAD_DIR = "model_upload_dir"
def upload_file(upload_dirname:str, file:UploadFile, filename: str):
if file and filename:
fileobj = file.file
upload_dir = open(os.path.join(upload_dirname, filename),'wb+')
shutil.copyfileobj(fileobj, upload_dir)
upload_dir.close()
return {"uploaded files": f"{filename} "}
return {"Error": "uploaded file is not found."}
def concat_file_chunks(upload_dirname:str, filename:str, chunkNum:int, dest_dirname:str):
target_file_name = os.path.join(dest_dirname, filename)
with open(target_file_name, "ab") as target_file:
for i in range(chunkNum):
chunkName = f"{filename}_{i}"
chunk_file_path = os.path.join(upload_dirname, chunkName)
stored_chunk_file = open(chunk_file_path, 'rb')
target_file.write(stored_chunk_file.read())
stored_chunk_file.close()
os.unlink(chunk_file_path)
target_file.close()
return target_file_name

13
demo/mods/Trainer_Model.py Executable file
View File

@ -0,0 +1,13 @@
from fastapi.responses import FileResponse
import os
def mod_get_model(modelFile:str):
modelPath = os.path.join("/MMVC_Trainer/logs", modelFile)
return FileResponse(path=modelPath)
def mod_delete_model(modelFile:str):
modelPath = os.path.join("/MMVC_Trainer/logs", modelFile)
os.unlink(modelPath)
return {"Model deleted": f"{modelFile}"}

21
demo/mods/Trainer_Models.py Executable file
View File

@ -0,0 +1,21 @@
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
from trainer_mods.files import get_file_list
import os
def mod_get_models():
gModels = get_file_list(f'/MMVC_Trainer/logs/G*.pth')
dModels = get_file_list(f'/MMVC_Trainer/logs/D*.pth')
models = []
models.extend(gModels)
models.extend(dModels)
models = [ os.path.basename(x) for x in models]
models = sorted(models)
data = {
"models":models
}
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(content=json_compatible_item_data)

View File

@ -0,0 +1,26 @@
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
import os
MULTI_SPEAKER_SETTING_PATH = "/MMVC_Trainer/dataset/multi_speaker_correspondence.txt"
def mod_get_multi_speaker_setting():
data = {}
if os.path.isfile(MULTI_SPEAKER_SETTING_PATH) == False:
with open(MULTI_SPEAKER_SETTING_PATH, "w") as f:
f.write("")
f.flush()
f.close()
with open(MULTI_SPEAKER_SETTING_PATH, "r") as f:
setting = f.read()
data["multi_speaker_setting"] = setting
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(content=json_compatible_item_data)
def mod_post_multi_speaker_setting(setting:str):
with open(MULTI_SPEAKER_SETTING_PATH, "w") as f:
f.write(setting)
f.flush()
f.close()
return {"Write Multispeaker setting": f"{setting}"}

15
demo/mods/Trainer_Speaker.py Executable file
View File

@ -0,0 +1,15 @@
import shutil
from mods.Trainer_MultiSpeakerSetting import MULTI_SPEAKER_SETTING_PATH
def mod_delete_speaker(speaker:str):
shutil.rmtree(f"/MMVC_Trainer/dataset/textful/{speaker}")
with open(MULTI_SPEAKER_SETTING_PATH, "r") as f:
setting = f.readlines()
filtered = filter(lambda x: x.startswith(f"{speaker}|")==False, setting)
with open(MULTI_SPEAKER_SETTING_PATH, "w") as f:
f.writelines(list(filtered))
f.flush()
f.close()
return {"Speaker deleted": f"{speaker}"}

View File

@ -0,0 +1,28 @@
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
import os, base64
def mod_get_speaker_voice(speaker:str, voice:str):
wav_file = f'/MMVC_Trainer/dataset/textful/{speaker}/wav/{voice}.wav'
text_file = f'/MMVC_Trainer/dataset/textful/{speaker}/text/{voice}.txt'
readable_text_file = f'/MMVC_Trainer/dataset/textful/{speaker}/readable_text/{voice}.txt'
data = {}
if os.path.exists(wav_file):
with open(wav_file, "rb") as f:
wav_data = f.read()
wav_data_base64 = base64.b64encode(wav_data).decode('utf-8')
data["wav"] = wav_data_base64
if os.path.exists(text_file):
with open(text_file, "r") as f:
text_data = f.read()
data["text"] = text_data
if os.path.exists(readable_text_file):
with open(readable_text_file, "r") as f:
text_data = f.read()
data["readable_text"] = text_data
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(content=json_compatible_item_data)

View File

@ -0,0 +1,22 @@
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
from trainer_mods.files import get_file_list
import os
def mod_get_speaker_voices(speaker:str):
voices = get_file_list(f'/MMVC_Trainer/dataset/textful/{speaker}/wav/*.wav')
texts = get_file_list(f'/MMVC_Trainer/dataset/textful/{speaker}/text/*.txt')
readable_texts = get_file_list(f'/MMVC_Trainer/dataset/textful/{speaker}/readable_text/*.txt')
items = voices
items.extend(texts)
items.extend(readable_texts)
items = [ os.path.splitext(os.path.basename(x))[0] for x in items]
items = sorted(set(items))
data = {
"voices":items
}
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(content=json_compatible_item_data)

15
demo/mods/Trainer_Speakers.py Executable file
View File

@ -0,0 +1,15 @@
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
from trainer_mods.files import get_dir_list
import os
# CreateはFileUploaderで実装。
def mod_get_speakers():
os.makedirs("/MMVC_Trainer/dataset/textful", exist_ok=True)
speakers = get_dir_list("/MMVC_Trainer/dataset/textful/")
data = {
"speakers":sorted(speakers)
}
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(content=json_compatible_item_data)

167
demo/mods/Trainer_Training.py Executable file
View File

@ -0,0 +1,167 @@
import subprocess,os
from trainer_mods.files import get_file_list
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
LOG_DIR = "/MMVC_Trainer/info"
train_proc = None
SUCCESS = 0
ERROR = -1
### Submodule for Pre train
def sync_exec(cmd:str, log_path:str):
shortCmdStr = cmd[:20]
try:
with open(log_path, 'w') as log_file:
proc = subprocess.run(cmd, shell=True, text=True, stdout=log_file, stderr=log_file, cwd="/MMVC_Trainer")
print(f"{shortCmdStr} returncode:{proc.returncode}")
if proc.returncode != 0:
print(f"{shortCmdStr} exception:")
return (ERROR, f"returncode:{proc.returncode}")
except Exception as e:
print(f"{shortCmdStr} exception:", str(e))
return (ERROR, str(e))
return (SUCCESS, "success")
def sync_exec_with_stdout(cmd:str, log_path:str):
shortCmdStr = cmd[:20]
try:
with open(log_path, 'w') as log_file:
proc = subprocess.run(cmd, shell=True, text=True, stdout=subprocess.PIPE,
stderr=log_file, cwd="/MMVC_Trainer")
print(f"STDOUT{shortCmdStr}",proc.stdout)
except Exception as e:
print(f"{shortCmdStr} exception:", str(e))
return (ERROR, str(e))
return (SUCCESS, proc.stdout)
def create_dataset():
cmd = "python3 create_dataset_jtalk.py -f train_config -s 24000 -m dataset/multi_speaker_correspondence.txt"
log_file = os.path.join(LOG_DIR, "log_create_dataset_jtalk.txt")
res = sync_exec(cmd, log_file)
return res
def set_batch_size(batch:int):
cmd = "sed -i 's/\"batch_size\": [0-9]*/\"batch_size\": " + str(batch) + "/' /MMVC_Trainer/configs/baseconfig.json"
log_file = os.path.join(LOG_DIR, "log_set_batch_size.txt")
res = sync_exec(cmd, log_file)
return res
def set_dummy_device_count():
cmd = 'sed -ie "s/torch.cuda.device_count()/1/" /MMVC_Trainer/train_ms.py'
log_file = os.path.join(LOG_DIR, "log_set_dummy_device_count.txt")
res = sync_exec(cmd, log_file)
return res
### Submodule for Train
def exec_training():
global train_proc
log_file = os.path.join(LOG_DIR, "training.txt")
# トレーニング開始確認(二重起動回避)
if train_proc != None:
status = train_proc.poll()
if status != None:
print("Training have ended.", status)
train_proc = None
else:
print("Training have stated.")
return (ERROR, "Training have started")
try:
with open(log_file, 'w') as log_file:
cmd = 'python3 train_ms.py -c configs/train_config.json -m ./'
print("exec:",cmd)
train_proc = subprocess.Popen("exec "+cmd, shell=True, text=True, stdout=log_file, stderr=log_file, cwd="/MMVC_Trainer")
print("Training stated")
print(f"returncode:{train_proc.returncode}")
except Exception as e:
print("start training exception:", str(e))
return (ERROR, str(e))
return (SUCCESS, "success")
def stop_training():
global train_proc
if train_proc == None:
print("Training have not stated.")
return (ERROR, "Training have not stated.")
status = train_proc.poll()
if status != None:
print("Training have already ended.", status)
train_proc = None
return (ERROR, "Training have already ended. " + status)
else:
train_proc.kill()
print("Training have stoped.")
return (SUCCESS, "success")
### Main
def mod_post_pre_training(batch:int):
res = set_batch_size(batch)
if res[0] == ERROR:
return {"result":"failed", "detail": f"Preprocess(set_batch_size) failed. {res[1]}"}
res = set_dummy_device_count()
if res[0] == ERROR:
return {"result":"failed", "detail": f"Preprocess(set_dummy_device_count) failed. {res[1]}"}
res = create_dataset()
if res[0] == ERROR:
return {"result":"failed", "detail": f"Preprocess failed(create_dataset). {res[1]}"}
return {"result":"success", "detail": f"Preprocess succeeded. {res[1]}"}
def mod_post_start_training():
res = exec_training()
if res[0] == ERROR:
return {"result":"failed", "detail": f"Start training failed. {res[1]}"}
return {"result":"success", "detail": f"Start training succeeded. {res[1]}"}
def mod_post_stop_training():
res = stop_training()
if res[0] == ERROR:
return {"result":"failed", "detail": f"Stop training failed. {res[1]}"}
return {"result":"success", "detail": f"Stop training succeeded. {res[1]}"}
### DEBUG
def mod_get_related_files():
files = get_file_list(os.path.join(LOG_DIR,"*"))
files.extend([
"/MMVC_Trainer/dataset/multi_speaker_correspondence.txt",
"/MMVC_Trainer/train_ms.py",
])
files.extend(
get_file_list("/MMVC_Trainer/configs/*")
)
res = []
for f in files:
size = os.path.getsize(f)
data = ""
if size < 1024*1024:
with open(f, "r") as input:
data = input.read()
res.append({
"name":f,
"size":size,
"data":data
})
json_compatible_item_data = jsonable_encoder(res)
return JSONResponse(content=json_compatible_item_data)
def mod_get_tail_training_log(num:int):
training_log_file = os.path.join(LOG_DIR, "training.txt")
res = sync_exec(f"cat {training_log_file} | sed -e 's/.*\r//' > /tmp/out","/dev/null")
cmd = f'tail -n {num} /tmp/out'
res = sync_exec_with_stdout(cmd, "/dev/null")
if res[0] == ERROR:
return {"result":"failed", "detail": f"Tail training log failed. {res[1]}"}
return {"result":"success", "detail":res[1]}

19
demo/trainer_mods/files.py Executable file
View File

@ -0,0 +1,19 @@
import os,glob
def get_file_list(top_dir):
for root, dirs, files in os.walk(top_dir):
for dir in dirs:
dirPath = os.path.join(root, dir)
print(f'dirPath = {dirPath}')
for file in files:
filePath = os.path.join(root, file)
print(f'filePath = {filePath}')
def get_dir_list(top_dir):
return os.listdir(top_dir)
def get_file_list(top_dir):
return glob.glob(top_dir)

4
frontend/dist/assets/setting_recorder.json vendored Executable file
View File

@ -0,0 +1,4 @@
{
"app_title": "recorder",
"majar_mode": "docker"
}

4
frontend/dist/assets/setting_trainer.json vendored Executable file
View File

@ -0,0 +1,4 @@
{
"app_title": "trainer",
"majar_mode": "docker"
}

View File

@ -1 +1 @@
<!doctype html><html lang="ja" style="width:100%;height:100%;overflow:hidden"><head><meta charset="utf-8"/><title>voice recorder</title><script defer="defer" src="index.js"></script></head><body style="width:100%;height:100%;margin:0"><div id="app" style="width:100%;height:100%"></div><noscript><strong>javascriptを有効にしてください</strong></noscript></body></html> <!doctype html><html lang="ja" style="width:100%;height:100%;overflow:hidden"><head><meta charset="utf-8"/><title>Realtime Voice Changer (Train/VC)</title><script defer="defer" src="index.js"></script></head><body style="width:100%;height:100%;margin:0"><div id="app" style="width:100%;height:100%"></div><noscript><strong>javascriptを有効にしてください</strong></noscript></body></html>

File diff suppressed because one or more lines are too long

View File

@ -1,20 +1,21 @@
#!/bin/bash #!/bin/bash
set -eu set -eu
DOCKER_IMAGE=dannadori/voice-changer:20221104_062009 #DOCKER_IMAGE=dannadori/voice-changer:20221108_105937
#DOCKER_IMAGE=voice-changer DOCKER_IMAGE=voice-changer
MODE=$1 MODE=$1
PARAMS=${@:2:($#-1)} PARAMS=${@:2:($#-1)}
### DEFAULT VAR ### ### DEFAULT VAR ###
DEFAULT_EX_PORT=18888 DEFAULT_EX_PORT=18888
DEFAULT_EX_TB_PORT=16006
DEFAULT_USE_GPU=on # on|off DEFAULT_USE_GPU=on # on|off
# DEFAULT_VERBOSE=off # on|off # DEFAULT_VERBOSE=off # on|off
### ENV VAR ### ### ENV VAR ###
EX_PORT=${EX_PORT:-${DEFAULT_EX_PORT}} EX_PORT=${EX_PORT:-${DEFAULT_EX_PORT}}
EX_TB_PORT=${EX_TB_PORT:-${DEFAULT_EX_TB_PORT}}
USE_GPU=${USE_GPU:-${DEFAULT_USE_GPU}} USE_GPU=${USE_GPU:-${DEFAULT_USE_GPU}}
# VERBOSE=${VERBOSE:-${DEFAULT_VERBOSE}} # VERBOSE=${VERBOSE:-${DEFAULT_VERBOSE}}
@ -30,16 +31,16 @@ if [ "${MODE}" = "MMVC_TRAIN" ]; then
echo "トレーニングを開始します" echo "トレーニングを開始します"
docker run -it --gpus all --shm-size=128M \ docker run -it --gpus all --shm-size=128M \
-v `pwd`/exp/${name}/dataset:/MMVC_Trainer/dataset \ -v `pwd`/work_dir/logs:/MMVC_Trainer/logs \
-v `pwd`/exp/${name}/logs:/MMVC_Trainer/logs \ -v `pwd`/work_dir/dataset:/MMVC_Trainer/dataset \
-v `pwd`/exp/${name}/filelists:/MMVC_Trainer/filelists \ -v `pwd`/work_dir/info:/MMVC_Trainer/info \
-v `pwd`/vc_resources:/resources \
-e LOCAL_UID=$(id -u $USER) \ -e LOCAL_UID=$(id -u $USER) \
-e LOCAL_GID=$(id -g $USER) \ -e LOCAL_GID=$(id -g $USER) \
-e EX_PORT=${EX_PORT} -e EX_TB_PORT=${EX_TB_PORT} \
-e EX_IP="`hostname -I`" \ -e EX_IP="`hostname -I`" \
-e EX_PORT=${EX_PORT} \ -p ${EX_PORT}:8080 -p ${EX_TB_PORT}:6006 \
-e VERBOSE=${VERBOSE} \ $DOCKER_IMAGE "$@"
-p ${EX_PORT}:6006 $DOCKER_IMAGE "$@"
elif [ "${MODE}" = "MMVC" ]; then elif [ "${MODE}" = "MMVC" ]; then
if [ "${USE_GPU}" = "on" ]; then if [ "${USE_GPU}" = "on" ]; then

View File

@ -1,4 +1,4 @@
FROM dannadori/voice-changer-internal:20221104_061809 as front FROM dannadori/voice-changer-internal:20221108_184110 as front
FROM debian:bullseye-slim as base FROM debian:bullseye-slim as base
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
@ -60,7 +60,6 @@ COPY --from=base --chmod=777 /usr/local/lib/python3.9/dist-packages /usr/local/l
COPY --from=front --chmod=777 /MMVC_Trainer /MMVC_Trainer COPY --from=front --chmod=777 /MMVC_Trainer /MMVC_Trainer
RUN chmod 0777 /MMVC_Trainer RUN chmod 0777 /MMVC_Trainer
WORKDIR /MMVC_Trainer
ADD /setup.sh /MMVC_Trainer/ ADD /setup.sh /MMVC_Trainer/
ADD /exec.sh /MMVC_Trainer/ ADD /exec.sh /MMVC_Trainer/
@ -69,6 +68,10 @@ COPY --from=front --chmod=777 /voice-changer-internal/frontend/dist /voice-chang
COPY --from=front --chmod=777 /voice-changer-internal/voice-change-service /voice-changer-internal/voice-change-service COPY --from=front --chmod=777 /voice-changer-internal/voice-change-service /voice-changer-internal/voice-change-service
RUN chmod 0777 /voice-changer-internal/voice-change-service RUN chmod 0777 /voice-changer-internal/voice-change-service
## 歴史的な経緯でsetup.shをMMVC_Trainerに置いているのでそこを起動時のWORKDIRにしている。
WORKDIR /MMVC_Trainer
# ##### Soft VC # ##### Soft VC
# COPY --from=front /hubert /hubert # COPY --from=front /hubert /hubert
# COPY --from=front /acoustic-model /acoustic-model # COPY --from=front /acoustic-model /acoustic-model

View File

@ -39,16 +39,17 @@ if [ "${MODE}" = "MMVC" ] ; then
# python3 MMVCServerSIO.py $PARAMS 2>stderr.txt # python3 MMVCServerSIO.py $PARAMS 2>stderr.txt
# fi # fi
echo "MMVCを起動します" echo "MMVCを起動します"
python3 MMVCServerSIO.py $PARAMS 2>stderr.txt python3 MMVCServerSIO.py $PARAMS #2>stderr.txt
elif [ "${MODE}" = "MMVC_TRAIN" ] ; then elif [ "${MODE}" = "MMVC_TRAIN" ] ; then
python3 create_dataset_jtalk.py -f train_config -s 24000 -m dataset/multi_speaker_correspondence.txt cd /voice-changer-internal/voice-change-service
# date_tag=`date +%Y%m%d%H%M%S` # python3 create_dataset_jtalk.py -f train_config -s 24000 -m dataset/multi_speaker_correspondence.txt
sed -ie 's/80000/8000/' train_ms.py # # date_tag=`date +%Y%m%d%H%M%S`
sed -ie "s/\"batch_size\": 10/\"batch_size\": $batch_size/" configs/train_config.json # sed -ie 's/80000/8000/' train_ms.py
sed -ie "s/torch.cuda.device_count()/1/" train_ms.py # sed -ie "s/\"batch_size\": 10/\"batch_size\": $batch_size/" configs/train_config.json
python3 -m tensorboard.main --logdir logs --port 6006 --host 0.0.0.0 & # sed -ie "s/torch.cuda.device_count()/1/" train_ms.py
python3 train_ms.py $PARAMS python3 -m tensorboard.main --logdir /MMVC_Trainer/logs --port 6006 --host 0.0.0.0 &
python3 MMVCServerSIO.py $PARAMS
# if ${resume_flag}; then # if ${resume_flag}; then
# echo "トレーニング再開。バッチサイズ: ${batch_size}。" # echo "トレーニング再開。バッチサイズ: ${batch_size}。"
# python3 train_ms.py -c configs/train_config.json -m vc # python3 train_ms.py -c configs/train_config.json -m vc

View File

@ -3,11 +3,12 @@
# 参考:https://programwiz.org/2022/03/22/how-to-write-shell-script-for-option-parsing/ # 参考:https://programwiz.org/2022/03/22/how-to-write-shell-script-for-option-parsing/
set -eu set -eu
echo "1"
# 実行ユーザ作成 # 実行ユーザ作成
USER_ID=${LOCAL_UID:-9001} USER_ID=${LOCAL_UID:-9001}
GROUP_ID=${LOCAL_GID:-9001} GROUP_ID=${LOCAL_GID:-9001}
echo "2"
echo "exec with [UID : $USER_ID, GID: $GROUP_ID]" echo "exec with [UID : $USER_ID, GID: $GROUP_ID]"
useradd -u $USER_ID -o -m user useradd -u $USER_ID -o -m user
groupmod -g $GROUP_ID user groupmod -g $GROUP_ID user