voice-changer/server/voice_changer/RVC/inferencer/InferencerManager.py

75 lines
3.0 KiB
Python
Raw Normal View History

2023-05-02 14:57:12 +03:00
from const import EnumInferenceTypes
from voice_changer.RVC.inferencer.Inferencer import Inferencer
2023-05-03 07:14:00 +03:00
from voice_changer.RVC.inferencer.OnnxRVCInferencer import OnnxRVCInferencer
from voice_changer.RVC.inferencer.OnnxRVCInferencerNono import OnnxRVCInferencerNono
2023-05-02 14:57:12 +03:00
from voice_changer.RVC.inferencer.RVCInferencer import RVCInferencer
from voice_changer.RVC.inferencer.RVCInferencerNono import RVCInferencerNono
2023-05-20 09:54:00 +03:00
from voice_changer.RVC.inferencer.RVCInferencerv2 import RVCInferencerv2
from voice_changer.RVC.inferencer.RVCInferencerv2Nono import RVCInferencerv2Nono
2023-05-02 14:57:12 +03:00
from voice_changer.RVC.inferencer.WebUIInferencer import WebUIInferencer
from voice_changer.RVC.inferencer.WebUIInferencerNono import WebUIInferencerNono
class InferencerManager:
currentInferencer: Inferencer | None = None
@classmethod
def getInferencer(
2023-05-29 11:34:35 +03:00
cls,
inferencerType: EnumInferenceTypes,
file: str,
gpu: int,
2023-05-02 14:57:12 +03:00
) -> Inferencer:
2023-05-29 11:34:35 +03:00
cls.currentInferencer = cls.loadInferencer(inferencerType, file, gpu)
2023-05-02 14:57:12 +03:00
return cls.currentInferencer
@classmethod
def loadInferencer(
2023-05-29 11:34:35 +03:00
cls,
inferencerType: EnumInferenceTypes,
file: str,
gpu: int,
2023-05-02 16:29:28 +03:00
) -> Inferencer:
if (
inferencerType == EnumInferenceTypes.pyTorchRVC
or inferencerType == EnumInferenceTypes.pyTorchRVC.value
):
2023-05-29 11:34:35 +03:00
return RVCInferencer().loadModel(file, gpu)
2023-05-02 16:29:28 +03:00
elif (
inferencerType == EnumInferenceTypes.pyTorchRVCNono
or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value
):
2023-05-29 11:34:35 +03:00
return RVCInferencerNono().loadModel(file, gpu)
2023-05-20 09:54:00 +03:00
elif (
inferencerType == EnumInferenceTypes.pyTorchRVCv2
or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value
):
2023-05-29 11:34:35 +03:00
return RVCInferencerv2().loadModel(file, gpu)
2023-05-20 09:54:00 +03:00
elif (
inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono
or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value
):
2023-05-29 11:34:35 +03:00
return RVCInferencerv2Nono().loadModel(file, gpu)
2023-05-02 16:29:28 +03:00
elif (
inferencerType == EnumInferenceTypes.pyTorchWebUI
or inferencerType == EnumInferenceTypes.pyTorchWebUI.value
):
2023-05-29 11:34:35 +03:00
return WebUIInferencer().loadModel(file, gpu)
2023-05-02 16:29:28 +03:00
elif (
inferencerType == EnumInferenceTypes.pyTorchWebUINono
or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value
):
2023-05-29 11:34:35 +03:00
return WebUIInferencerNono().loadModel(file, gpu)
2023-05-02 16:29:28 +03:00
elif (
inferencerType == EnumInferenceTypes.onnxRVC
or inferencerType == EnumInferenceTypes.onnxRVC.value
):
2023-05-29 11:34:35 +03:00
return OnnxRVCInferencer().loadModel(file, gpu)
2023-05-02 16:29:28 +03:00
elif (
inferencerType == EnumInferenceTypes.onnxRVCNono
or inferencerType == EnumInferenceTypes.onnxRVCNono.value
):
2023-05-29 11:34:35 +03:00
return OnnxRVCInferencerNono().loadModel(file, gpu)
2023-05-02 14:57:12 +03:00
else:
raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)