mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-02-03 00:33:57 +03:00
bugfix: smaple download
This commit is contained in:
parent
56ee649196
commit
9c56579df1
@ -106,8 +106,8 @@ def getSampleJsonAndModelIds(mode: RVCSampleMode):
|
|||||||
], [
|
], [
|
||||||
("Tsukuyomi-chan_o", {"useIndex": False}),
|
("Tsukuyomi-chan_o", {"useIndex": False}),
|
||||||
("Amitaro_o", {"useIndex": False}),
|
("Amitaro_o", {"useIndex": False}),
|
||||||
("KikotoMahiro_o", {"useIndex": False}),
|
# ("KikotoMahiro_o", {"useIndex": False}),
|
||||||
("TokinaShigure_o", {"useIndex": True}),
|
# ("TokinaShigure_o", {"useIndex": True}),
|
||||||
]
|
]
|
||||||
elif mode == "testOfficial":
|
elif mode == "testOfficial":
|
||||||
return [
|
return [
|
||||||
|
@ -76,7 +76,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
|
|||||||
for i, initSampleId in enumerate(sampleModelIds):
|
for i, initSampleId in enumerate(sampleModelIds):
|
||||||
targetSampleId = initSampleId[0]
|
targetSampleId = initSampleId[0]
|
||||||
targetSampleParams = initSampleId[1]
|
targetSampleParams = initSampleId[1]
|
||||||
tagetSlotIndex = slotIndex[i]
|
targetSlotIndex = slotIndex[i]
|
||||||
|
|
||||||
# 初期サンプルをサーチ
|
# 初期サンプルをサーチ
|
||||||
match = False
|
match = False
|
||||||
@ -89,7 +89,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# 検出されたら、、、
|
# 検出されたら、、、
|
||||||
slotDir = os.path.join(model_dir, str(tagetSlotIndex))
|
slotDir = os.path.join(model_dir, str(targetSlotIndex))
|
||||||
if sample.voiceChangerType == "RVC":
|
if sample.voiceChangerType == "RVC":
|
||||||
slotInfo: RVCModelSlot = RVCModelSlot()
|
slotInfo: RVCModelSlot = RVCModelSlot()
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
|
|||||||
slotInfo.defaultIndexRatio = 1
|
slotInfo.defaultIndexRatio = 1
|
||||||
slotInfo.defaultProtect = 0.5
|
slotInfo.defaultProtect = 0.5
|
||||||
slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx")
|
slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx")
|
||||||
modelSlotManager.save_model_slot(tagetSlotIndex, slotInfo)
|
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
|
||||||
else:
|
else:
|
||||||
print(f"[Voice Changer] {sample.voiceChangerType} is not supported.")
|
print(f"[Voice Changer] {sample.voiceChangerType} is not supported.")
|
||||||
|
|
||||||
@ -169,4 +169,4 @@ def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str
|
|||||||
_setInfoByONNX(slotInfo)
|
_setInfoByONNX(slotInfo)
|
||||||
else:
|
else:
|
||||||
_setInfoByPytorch(slotInfo)
|
_setInfoByPytorch(slotInfo)
|
||||||
modelSlotManager.save_model_slot(tagetSlotIndex, slotInfo)
|
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
|
||||||
|
@ -30,45 +30,22 @@ class InferencerManager:
|
|||||||
file: str,
|
file: str,
|
||||||
gpu: int,
|
gpu: int,
|
||||||
) -> Inferencer:
|
) -> Inferencer:
|
||||||
if (
|
print("inferencerTypeinferencerTypeinferencerTypeinferencerType", inferencerType)
|
||||||
inferencerType == EnumInferenceTypes.pyTorchRVC
|
if inferencerType == EnumInferenceTypes.pyTorchRVC or inferencerType == EnumInferenceTypes.pyTorchRVC.value:
|
||||||
or inferencerType == EnumInferenceTypes.pyTorchRVC.value
|
|
||||||
):
|
|
||||||
return RVCInferencer().loadModel(file, gpu)
|
return RVCInferencer().loadModel(file, gpu)
|
||||||
elif (
|
elif inferencerType == EnumInferenceTypes.pyTorchRVCNono or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value:
|
||||||
inferencerType == EnumInferenceTypes.pyTorchRVCNono
|
|
||||||
or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value
|
|
||||||
):
|
|
||||||
return RVCInferencerNono().loadModel(file, gpu)
|
return RVCInferencerNono().loadModel(file, gpu)
|
||||||
elif (
|
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2 or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value:
|
||||||
inferencerType == EnumInferenceTypes.pyTorchRVCv2
|
|
||||||
or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value
|
|
||||||
):
|
|
||||||
return RVCInferencerv2().loadModel(file, gpu)
|
return RVCInferencerv2().loadModel(file, gpu)
|
||||||
elif (
|
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value:
|
||||||
inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono
|
|
||||||
or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value
|
|
||||||
):
|
|
||||||
return RVCInferencerv2Nono().loadModel(file, gpu)
|
return RVCInferencerv2Nono().loadModel(file, gpu)
|
||||||
elif (
|
elif inferencerType == EnumInferenceTypes.pyTorchWebUI or inferencerType == EnumInferenceTypes.pyTorchWebUI.value:
|
||||||
inferencerType == EnumInferenceTypes.pyTorchWebUI
|
|
||||||
or inferencerType == EnumInferenceTypes.pyTorchWebUI.value
|
|
||||||
):
|
|
||||||
return WebUIInferencer().loadModel(file, gpu)
|
return WebUIInferencer().loadModel(file, gpu)
|
||||||
elif (
|
elif inferencerType == EnumInferenceTypes.pyTorchWebUINono or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value:
|
||||||
inferencerType == EnumInferenceTypes.pyTorchWebUINono
|
|
||||||
or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value
|
|
||||||
):
|
|
||||||
return WebUIInferencerNono().loadModel(file, gpu)
|
return WebUIInferencerNono().loadModel(file, gpu)
|
||||||
elif (
|
elif inferencerType == EnumInferenceTypes.onnxRVC or inferencerType == EnumInferenceTypes.onnxRVC.value:
|
||||||
inferencerType == EnumInferenceTypes.onnxRVC
|
|
||||||
or inferencerType == EnumInferenceTypes.onnxRVC.value
|
|
||||||
):
|
|
||||||
return OnnxRVCInferencer().loadModel(file, gpu)
|
return OnnxRVCInferencer().loadModel(file, gpu)
|
||||||
elif (
|
elif inferencerType == EnumInferenceTypes.onnxRVCNono or inferencerType == EnumInferenceTypes.onnxRVCNono.value:
|
||||||
inferencerType == EnumInferenceTypes.onnxRVCNono
|
|
||||||
or inferencerType == EnumInferenceTypes.onnxRVCNono.value
|
|
||||||
):
|
|
||||||
return OnnxRVCInferencerNono().loadModel(file, gpu)
|
return OnnxRVCInferencerNono().loadModel(file, gpu)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)
|
raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)
|
||||||
|
Loading…
Reference in New Issue
Block a user