voice-changer/server/voice_changer/VoiceChangerManager.py

113 lines
3.5 KiB
Python
Raw Normal View History

2022-12-31 10:02:53 +03:00
import numpy as np
2022-12-31 10:08:14 +03:00
from voice_changer.VoiceChanger import VoiceChanger
2023-04-10 18:21:17 +03:00
from const import ModelType
2023-04-28 00:39:51 +03:00
from voice_changer.utils.LoadModelParams import LoadModelParams
from voice_changer.utils.VoiceChangerModel import AudioInOut
2023-04-27 17:38:25 +03:00
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
from dataclasses import dataclass, asdict
import torch
@dataclass()
class GPUInfo:
id: int
name: str
memory: int
@dataclass()
class VoiceChangerManagerSettings:
dummy: int
# intData: list[str] = field(default_factory=lambda: ["slotIndex"])
2022-12-31 10:02:53 +03:00
2023-01-29 03:42:45 +03:00
2023-04-27 17:38:25 +03:00
class VoiceChangerManager(object):
_instance = None
def __init__(self, params: VoiceChangerParams):
self.voiceChanger: VoiceChanger = None
self.settings: VoiceChangerManagerSettings = VoiceChangerManagerSettings(dummy=0)
# スタティックな情報を収集
self.gpus: list[GPUInfo] = self._get_gpuInfos()
def _get_gpuInfos(self):
devCount = torch.cuda.device_count()
gpus = []
for id in range(devCount):
name = torch.cuda.get_device_name(id)
memory = torch.cuda.get_device_properties(id).total_memory
gpu = {"id": id, "name": name, "memory": memory}
gpus.append(gpu)
return gpus
2023-04-27 17:38:25 +03:00
2022-12-31 10:02:53 +03:00
@classmethod
2023-04-27 17:38:25 +03:00
def get_instance(cls, params: VoiceChangerParams):
if cls._instance is None:
cls._instance = cls(params)
2023-03-16 02:11:38 +03:00
cls._instance.voiceChanger = VoiceChanger(params)
2022-12-31 10:02:53 +03:00
return cls._instance
2023-04-28 00:39:51 +03:00
def loadModel(self, props: LoadModelParams):
2023-04-16 03:56:12 +03:00
info = self.voiceChanger.loadModel(props)
2023-04-14 05:03:52 +03:00
if hasattr(info, "status") and info["status"] == "NG":
return info
else:
info["status"] = "OK"
return info
2022-12-31 10:02:53 +03:00
2023-01-07 18:25:21 +03:00
def get_info(self):
data = asdict(self.settings)
data["gpus"] = self.gpus
data["status"] = "OK"
2023-04-27 17:38:25 +03:00
if hasattr(self, "voiceChanger"):
2023-01-10 16:49:16 +03:00
info = self.voiceChanger.get_info()
data.update(info)
return data
2023-01-07 18:25:21 +03:00
else:
2023-01-29 03:42:45 +03:00
return {"status": "ERROR", "msg": "no model loaded"}
2023-05-06 22:18:18 +03:00
def get_performance(self):
if hasattr(self, "voiceChanger"):
info = self.voiceChanger.get_performance()
return info
else:
return {"status": "ERROR", "msg": "no model loaded"}
2023-01-07 18:25:21 +03:00
2023-04-28 00:39:51 +03:00
def update_settings(self, key: str, val: str | int | float):
2023-04-27 17:38:25 +03:00
if hasattr(self, "voiceChanger"):
self.voiceChanger.update_settings(key, val)
2023-01-07 18:25:21 +03:00
else:
2023-01-29 03:42:45 +03:00
return {"status": "ERROR", "msg": "no model loaded"}
return self.get_info()
2023-01-08 10:18:20 +03:00
2023-04-28 00:39:51 +03:00
def changeVoice(self, receivedData: AudioInOut):
2023-04-27 17:38:25 +03:00
if hasattr(self, "voiceChanger") is True:
return self.voiceChanger.on_request(receivedData)
2023-01-04 20:28:36 +03:00
else:
print("Voice Change is not loaded. Did you load a correct model?")
2023-02-20 22:07:43 +03:00
return np.zeros(1).astype(np.int16), []
2023-04-10 18:21:17 +03:00
def switchModelType(self, modelType: ModelType):
return self.voiceChanger.switchModelType(modelType)
def getModelType(self):
return self.voiceChanger.getModelType()
2023-04-13 02:00:28 +03:00
def export2onnx(self):
return self.voiceChanger.export2onnx()
2023-04-30 20:34:01 +03:00
def merge_models(self, request: str):
return self.voiceChanger.merge_models(request)
2023-05-20 22:21:54 +03:00
def update_model_default(self):
return self.voiceChanger.update_model_default()
2023-06-07 21:08:59 +03:00
def update_model_info(self, newData: str):
return self.voiceChanger.update_model_info(newData)
def upload_model_assets(self, params: str):
return self.voiceChanger.upload_model_assets(params)