From 9c56579df1990a01c9e9d5cbfc0a1ee56942ca3d Mon Sep 17 00:00:00 2001 From: wataru Date: Sun, 18 Jun 2023 09:24:47 +0900 Subject: [PATCH] bugfix: smaple download --- server/const.py | 4 +- server/downloader/SampleDownloader.py | 8 ++-- .../RVC/inferencer/InferencerManager.py | 41 ++++--------------- 3 files changed, 15 insertions(+), 38 deletions(-) diff --git a/server/const.py b/server/const.py index 35cd747b..1a3bacc9 100644 --- a/server/const.py +++ b/server/const.py @@ -106,8 +106,8 @@ def getSampleJsonAndModelIds(mode: RVCSampleMode): ], [ ("Tsukuyomi-chan_o", {"useIndex": False}), ("Amitaro_o", {"useIndex": False}), - ("KikotoMahiro_o", {"useIndex": False}), - ("TokinaShigure_o", {"useIndex": True}), + # ("KikotoMahiro_o", {"useIndex": False}), + # ("TokinaShigure_o", {"useIndex": True}), ] elif mode == "testOfficial": return [ diff --git a/server/downloader/SampleDownloader.py b/server/downloader/SampleDownloader.py index 55748897..ecb5a08e 100644 --- a/server/downloader/SampleDownloader.py +++ b/server/downloader/SampleDownloader.py @@ -76,7 +76,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str for i, initSampleId in enumerate(sampleModelIds): targetSampleId = initSampleId[0] targetSampleParams = initSampleId[1] - tagetSlotIndex = slotIndex[i] + targetSlotIndex = slotIndex[i] # 初期サンプルをサーチ match = False @@ -89,7 +89,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str continue # 検出されたら、、、 - slotDir = os.path.join(model_dir, str(tagetSlotIndex)) + slotDir = os.path.join(model_dir, str(targetSlotIndex)) if sample.voiceChangerType == "RVC": slotInfo: RVCModelSlot = RVCModelSlot() @@ -147,7 +147,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str slotInfo.defaultIndexRatio = 1 slotInfo.defaultProtect = 0.5 slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx") - modelSlotManager.save_model_slot(tagetSlotIndex, slotInfo) + modelSlotManager.save_model_slot(targetSlotIndex, slotInfo) else: print(f"[Voice Changer] {sample.voiceChangerType} is not supported.") @@ -169,4 +169,4 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str _setInfoByONNX(slotInfo) else: _setInfoByPytorch(slotInfo) - modelSlotManager.save_model_slot(tagetSlotIndex, slotInfo) + modelSlotManager.save_model_slot(targetSlotIndex, slotInfo) diff --git a/server/voice_changer/RVC/inferencer/InferencerManager.py b/server/voice_changer/RVC/inferencer/InferencerManager.py index bc42772c..cd5f0a6e 100644 --- a/server/voice_changer/RVC/inferencer/InferencerManager.py +++ b/server/voice_changer/RVC/inferencer/InferencerManager.py @@ -30,45 +30,22 @@ class InferencerManager: file: str, gpu: int, ) -> Inferencer: - if ( - inferencerType == EnumInferenceTypes.pyTorchRVC - or inferencerType == EnumInferenceTypes.pyTorchRVC.value - ): + print("inferencerTypeinferencerTypeinferencerTypeinferencerType", inferencerType) + if inferencerType == EnumInferenceTypes.pyTorchRVC or inferencerType == EnumInferenceTypes.pyTorchRVC.value: return RVCInferencer().loadModel(file, gpu) - elif ( - inferencerType == EnumInferenceTypes.pyTorchRVCNono - or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value - ): + elif inferencerType == EnumInferenceTypes.pyTorchRVCNono or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value: return RVCInferencerNono().loadModel(file, gpu) - elif ( - inferencerType == EnumInferenceTypes.pyTorchRVCv2 - or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value - ): + elif inferencerType == EnumInferenceTypes.pyTorchRVCv2 or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value: return RVCInferencerv2().loadModel(file, gpu) - elif ( - inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono - or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value - ): + elif inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value: return RVCInferencerv2Nono().loadModel(file, gpu) - elif ( - inferencerType == EnumInferenceTypes.pyTorchWebUI - or inferencerType == EnumInferenceTypes.pyTorchWebUI.value - ): + elif inferencerType == EnumInferenceTypes.pyTorchWebUI or inferencerType == EnumInferenceTypes.pyTorchWebUI.value: return WebUIInferencer().loadModel(file, gpu) - elif ( - inferencerType == EnumInferenceTypes.pyTorchWebUINono - or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value - ): + elif inferencerType == EnumInferenceTypes.pyTorchWebUINono or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value: return WebUIInferencerNono().loadModel(file, gpu) - elif ( - inferencerType == EnumInferenceTypes.onnxRVC - or inferencerType == EnumInferenceTypes.onnxRVC.value - ): + elif inferencerType == EnumInferenceTypes.onnxRVC or inferencerType == EnumInferenceTypes.onnxRVC.value: return OnnxRVCInferencer().loadModel(file, gpu) - elif ( - inferencerType == EnumInferenceTypes.onnxRVCNono - or inferencerType == EnumInferenceTypes.onnxRVCNono.value - ): + elif inferencerType == EnumInferenceTypes.onnxRVCNono or inferencerType == EnumInferenceTypes.onnxRVCNono.value: return OnnxRVCInferencerNono().loadModel(file, gpu) else: raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)