mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 13:35:12 +03:00
bugfix: smaple download
This commit is contained in:
parent
56ee649196
commit
9c56579df1
@ -106,8 +106,8 @@ def getSampleJsonAndModelIds(mode: RVCSampleMode):
|
||||
], [
|
||||
("Tsukuyomi-chan_o", {"useIndex": False}),
|
||||
("Amitaro_o", {"useIndex": False}),
|
||||
("KikotoMahiro_o", {"useIndex": False}),
|
||||
("TokinaShigure_o", {"useIndex": True}),
|
||||
# ("KikotoMahiro_o", {"useIndex": False}),
|
||||
# ("TokinaShigure_o", {"useIndex": True}),
|
||||
]
|
||||
elif mode == "testOfficial":
|
||||
return [
|
||||
|
@ -76,7 +76,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
|
||||
for i, initSampleId in enumerate(sampleModelIds):
|
||||
targetSampleId = initSampleId[0]
|
||||
targetSampleParams = initSampleId[1]
|
||||
tagetSlotIndex = slotIndex[i]
|
||||
targetSlotIndex = slotIndex[i]
|
||||
|
||||
# 初期サンプルをサーチ
|
||||
match = False
|
||||
@ -89,7 +89,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
|
||||
continue
|
||||
|
||||
# 検出されたら、、、
|
||||
slotDir = os.path.join(model_dir, str(tagetSlotIndex))
|
||||
slotDir = os.path.join(model_dir, str(targetSlotIndex))
|
||||
if sample.voiceChangerType == "RVC":
|
||||
slotInfo: RVCModelSlot = RVCModelSlot()
|
||||
|
||||
@ -147,7 +147,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
|
||||
slotInfo.defaultIndexRatio = 1
|
||||
slotInfo.defaultProtect = 0.5
|
||||
slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx")
|
||||
modelSlotManager.save_model_slot(tagetSlotIndex, slotInfo)
|
||||
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
|
||||
else:
|
||||
print(f"[Voice Changer] {sample.voiceChangerType} is not supported.")
|
||||
|
||||
@ -169,4 +169,4 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
|
||||
_setInfoByONNX(slotInfo)
|
||||
else:
|
||||
_setInfoByPytorch(slotInfo)
|
||||
modelSlotManager.save_model_slot(tagetSlotIndex, slotInfo)
|
||||
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
|
||||
|
@ -30,45 +30,22 @@ class InferencerManager:
|
||||
file: str,
|
||||
gpu: int,
|
||||
) -> Inferencer:
|
||||
if (
|
||||
inferencerType == EnumInferenceTypes.pyTorchRVC
|
||||
or inferencerType == EnumInferenceTypes.pyTorchRVC.value
|
||||
):
|
||||
print("inferencerTypeinferencerTypeinferencerTypeinferencerType", inferencerType)
|
||||
if inferencerType == EnumInferenceTypes.pyTorchRVC or inferencerType == EnumInferenceTypes.pyTorchRVC.value:
|
||||
return RVCInferencer().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchRVCNono
|
||||
or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value
|
||||
):
|
||||
elif inferencerType == EnumInferenceTypes.pyTorchRVCNono or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value:
|
||||
return RVCInferencerNono().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchRVCv2
|
||||
or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value
|
||||
):
|
||||
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2 or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value:
|
||||
return RVCInferencerv2().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono
|
||||
or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value
|
||||
):
|
||||
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value:
|
||||
return RVCInferencerv2Nono().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchWebUI
|
||||
or inferencerType == EnumInferenceTypes.pyTorchWebUI.value
|
||||
):
|
||||
elif inferencerType == EnumInferenceTypes.pyTorchWebUI or inferencerType == EnumInferenceTypes.pyTorchWebUI.value:
|
||||
return WebUIInferencer().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.pyTorchWebUINono
|
||||
or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value
|
||||
):
|
||||
elif inferencerType == EnumInferenceTypes.pyTorchWebUINono or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value:
|
||||
return WebUIInferencerNono().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.onnxRVC
|
||||
or inferencerType == EnumInferenceTypes.onnxRVC.value
|
||||
):
|
||||
elif inferencerType == EnumInferenceTypes.onnxRVC or inferencerType == EnumInferenceTypes.onnxRVC.value:
|
||||
return OnnxRVCInferencer().loadModel(file, gpu)
|
||||
elif (
|
||||
inferencerType == EnumInferenceTypes.onnxRVCNono
|
||||
or inferencerType == EnumInferenceTypes.onnxRVCNono.value
|
||||
):
|
||||
elif inferencerType == EnumInferenceTypes.onnxRVCNono or inferencerType == EnumInferenceTypes.onnxRVCNono.value:
|
||||
return OnnxRVCInferencerNono().loadModel(file, gpu)
|
||||
else:
|
||||
raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)
|
||||
|
Loading…
Reference in New Issue
Block a user