bugfix: smaple download

This commit is contained in:
wataru 2023-06-18 09:24:47 +09:00
parent 56ee649196
commit 9c56579df1
3 changed files with 15 additions and 38 deletions

View File

@ -106,8 +106,8 @@ def getSampleJsonAndModelIds(mode: RVCSampleMode):
], [
("Tsukuyomi-chan_o", {"useIndex": False}),
("Amitaro_o", {"useIndex": False}),
("KikotoMahiro_o", {"useIndex": False}),
("TokinaShigure_o", {"useIndex": True}),
# ("KikotoMahiro_o", {"useIndex": False}),
# ("TokinaShigure_o", {"useIndex": True}),
]
elif mode == "testOfficial":
return [

View File

@ -76,7 +76,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
for i, initSampleId in enumerate(sampleModelIds):
targetSampleId = initSampleId[0]
targetSampleParams = initSampleId[1]
tagetSlotIndex = slotIndex[i]
targetSlotIndex = slotIndex[i]
# 初期サンプルをサーチ
match = False
@ -89,7 +89,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
continue
# 検出されたら、、、
slotDir = os.path.join(model_dir, str(tagetSlotIndex))
slotDir = os.path.join(model_dir, str(targetSlotIndex))
if sample.voiceChangerType == "RVC":
slotInfo: RVCModelSlot = RVCModelSlot()
@ -147,7 +147,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
slotInfo.defaultIndexRatio = 1
slotInfo.defaultProtect = 0.5
slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx")
modelSlotManager.save_model_slot(tagetSlotIndex, slotInfo)
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
else:
print(f"[Voice Changer] {sample.voiceChangerType} is not supported.")
@ -169,4 +169,4 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
_setInfoByONNX(slotInfo)
else:
_setInfoByPytorch(slotInfo)
modelSlotManager.save_model_slot(tagetSlotIndex, slotInfo)
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)

View File

@ -30,45 +30,22 @@ class InferencerManager:
file: str,
gpu: int,
) -> Inferencer:
if (
inferencerType == EnumInferenceTypes.pyTorchRVC
or inferencerType == EnumInferenceTypes.pyTorchRVC.value
):
print("inferencerTypeinferencerTypeinferencerTypeinferencerType", inferencerType)
if inferencerType == EnumInferenceTypes.pyTorchRVC or inferencerType == EnumInferenceTypes.pyTorchRVC.value:
return RVCInferencer().loadModel(file, gpu)
elif (
inferencerType == EnumInferenceTypes.pyTorchRVCNono
or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value
):
elif inferencerType == EnumInferenceTypes.pyTorchRVCNono or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value:
return RVCInferencerNono().loadModel(file, gpu)
elif (
inferencerType == EnumInferenceTypes.pyTorchRVCv2
or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value
):
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2 or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value:
return RVCInferencerv2().loadModel(file, gpu)
elif (
inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono
or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value
):
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value:
return RVCInferencerv2Nono().loadModel(file, gpu)
elif (
inferencerType == EnumInferenceTypes.pyTorchWebUI
or inferencerType == EnumInferenceTypes.pyTorchWebUI.value
):
elif inferencerType == EnumInferenceTypes.pyTorchWebUI or inferencerType == EnumInferenceTypes.pyTorchWebUI.value:
return WebUIInferencer().loadModel(file, gpu)
elif (
inferencerType == EnumInferenceTypes.pyTorchWebUINono
or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value
):
elif inferencerType == EnumInferenceTypes.pyTorchWebUINono or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value:
return WebUIInferencerNono().loadModel(file, gpu)
elif (
inferencerType == EnumInferenceTypes.onnxRVC
or inferencerType == EnumInferenceTypes.onnxRVC.value
):
elif inferencerType == EnumInferenceTypes.onnxRVC or inferencerType == EnumInferenceTypes.onnxRVC.value:
return OnnxRVCInferencer().loadModel(file, gpu)
elif (
inferencerType == EnumInferenceTypes.onnxRVCNono
or inferencerType == EnumInferenceTypes.onnxRVCNono.value
):
elif inferencerType == EnumInferenceTypes.onnxRVCNono or inferencerType == EnumInferenceTypes.onnxRVCNono.value:
return OnnxRVCInferencerNono().loadModel(file, gpu)
else:
raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)