2022-12-31 10:02:53 +03:00
|
|
|
import numpy as np
|
2023-06-15 18:56:18 +03:00
|
|
|
import threading
|
|
|
|
from data.ModelSample import ModelSamples
|
|
|
|
from data.ModelSlot import ModelSlots, loadSlotInfo
|
|
|
|
from utils.downloader.SampleDownloader import downloadSample, getSampleInfos
|
2023-06-15 19:49:49 +03:00
|
|
|
from voice_changer.Local.AudioDeviceList import ServerAudioDevice, list_audio_device
|
2023-06-15 18:56:18 +03:00
|
|
|
from voice_changer.Local.ServerDevice import ServerDevice
|
|
|
|
from voice_changer.RVC.ModelSlotGenerator import setSlotAsRVC
|
|
|
|
|
2022-12-31 10:08:14 +03:00
|
|
|
from voice_changer.VoiceChanger import VoiceChanger
|
2023-06-15 18:56:18 +03:00
|
|
|
from const import MAX_SLOT_NUM, VoiceChangerType
|
2023-04-28 00:39:51 +03:00
|
|
|
from voice_changer.utils.VoiceChangerModel import AudioInOut
|
2023-04-27 17:38:25 +03:00
|
|
|
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
|
2022-12-31 10:02:53 +03:00
|
|
|
|
2023-06-15 18:56:18 +03:00
|
|
|
from dataclasses import dataclass, asdict, field
|
|
|
|
import torch
|
|
|
|
import json
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass()
|
|
|
|
class GPUInfo:
|
|
|
|
id: int
|
|
|
|
name: str
|
|
|
|
memory: int
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass()
|
|
|
|
class VoiceChangerManagerSettings:
|
|
|
|
slotIndex: int
|
2023-06-15 19:49:49 +03:00
|
|
|
|
2023-06-15 18:56:18 +03:00
|
|
|
intData: list[str] = field(default_factory=lambda: ["slotIndex"])
|
|
|
|
|
2023-01-29 03:42:45 +03:00
|
|
|
|
2023-04-27 17:38:25 +03:00
|
|
|
class VoiceChangerManager(object):
|
|
|
|
_instance = None
|
2023-06-15 18:56:18 +03:00
|
|
|
|
|
|
|
def __init__(self, params: VoiceChangerParams):
|
|
|
|
self.voiceChanger: VoiceChanger = None
|
|
|
|
self.settings: VoiceChangerManagerSettings = VoiceChangerManagerSettings(slotIndex=0)
|
|
|
|
self.params: VoiceChangerParams = params
|
|
|
|
|
|
|
|
self.serverDevice = ServerDevice()
|
|
|
|
|
|
|
|
# スタティックな情報を収集
|
|
|
|
self.sampleModels: list[ModelSamples] = getSampleInfos(self.params.sample_mode)
|
|
|
|
self.gpus: list[GPUInfo] = self._get_gpuInfos()
|
|
|
|
|
2023-06-15 19:49:49 +03:00
|
|
|
audioinput, audiooutput = list_audio_device()
|
|
|
|
self.serverAudioInputDevices: list[ServerAudioDevice] = audioinput
|
|
|
|
self.serverAudioOutputDevices: list[ServerAudioDevice] = audiooutput
|
|
|
|
|
|
|
|
# ServerDevice
|
|
|
|
thread = threading.Thread(target=self.serverDevice.serverLocal, args=(self,))
|
|
|
|
thread.start()
|
|
|
|
|
2023-06-15 18:56:18 +03:00
|
|
|
def _get_gpuInfos(self):
|
|
|
|
devCount = torch.cuda.device_count()
|
|
|
|
gpus = []
|
|
|
|
for id in range(devCount):
|
|
|
|
name = torch.cuda.get_device_name(id)
|
|
|
|
memory = torch.cuda.get_device_properties(id).total_memory
|
|
|
|
gpu = {"id": id, "name": name, "memory": memory}
|
|
|
|
gpus.append(gpu)
|
|
|
|
return gpus
|
2023-04-27 17:38:25 +03:00
|
|
|
|
2022-12-31 10:02:53 +03:00
|
|
|
@classmethod
|
2023-04-27 17:38:25 +03:00
|
|
|
def get_instance(cls, params: VoiceChangerParams):
|
|
|
|
if cls._instance is None:
|
2023-06-15 18:56:18 +03:00
|
|
|
cls._instance = cls(params)
|
|
|
|
|
|
|
|
gpu_num = torch.cuda.device_count()
|
|
|
|
mps_enabled: bool = getattr(torch.backends, "mps", None) is not None and torch.backends.mps.is_available()
|
|
|
|
print(f"VoiceChanger Initialized (GPU_NUM:{gpu_num}, mps_enabled:{mps_enabled})")
|
|
|
|
|
|
|
|
cls._instance.voiceChanger = VoiceChanger(params, cls._instance.settings.slotIndex)
|
|
|
|
cls._instance.voiceChanger.prepareModel()
|
2022-12-31 10:02:53 +03:00
|
|
|
return cls._instance
|
|
|
|
|
2023-06-15 18:56:18 +03:00
|
|
|
def loadModel(self, slot: int, voiceChangerType: VoiceChangerType, params: str):
|
|
|
|
print(slot, voiceChangerType, params)
|
|
|
|
paramDict = json.loads(params)
|
|
|
|
if voiceChangerType == "RVC":
|
|
|
|
if "sampleId" in paramDict and len(paramDict["sampleId"]) > 0:
|
|
|
|
print("[Voice Canger]: Download RVC sample.")
|
|
|
|
downloadSample(self.params.sample_mode, paramDict["sampleId"], self.params.model_dir, slot, {"useIndex": paramDict["rvcIndexDownload"]})
|
|
|
|
else:
|
|
|
|
print("[Voice Canger]: Set uploaded RVC model to slot.")
|
|
|
|
setSlotAsRVC(self.params.model_dir, slot, paramDict)
|
|
|
|
|
|
|
|
return self.get_info()
|
|
|
|
|
|
|
|
def get_slotInfos(self):
|
|
|
|
slotInfos: list[ModelSlots] = []
|
|
|
|
for slotIndex in range(MAX_SLOT_NUM):
|
|
|
|
slotInfo = loadSlotInfo(self.params.model_dir, slotIndex)
|
|
|
|
slotInfos.append(slotInfo)
|
|
|
|
return slotInfos
|
2022-12-31 10:02:53 +03:00
|
|
|
|
2023-01-07 18:25:21 +03:00
|
|
|
def get_info(self):
|
2023-06-15 18:56:18 +03:00
|
|
|
data = asdict(self.settings)
|
|
|
|
slotInfos = self.get_slotInfos()
|
|
|
|
data["slotInfos"] = slotInfos
|
|
|
|
data["gpus"] = self.gpus
|
|
|
|
data["sampleModels"] = self.sampleModels
|
2023-06-15 19:49:49 +03:00
|
|
|
data["serverAudioInputDevices"] = self.serverAudioInputDevices
|
|
|
|
data["serverAudioOutputDevices"] = self.serverAudioOutputDevices
|
2023-06-15 18:56:18 +03:00
|
|
|
|
|
|
|
data["status"] = "OK"
|
2023-04-27 17:38:25 +03:00
|
|
|
if hasattr(self, "voiceChanger"):
|
2023-01-10 16:49:16 +03:00
|
|
|
info = self.voiceChanger.get_info()
|
2023-06-15 18:56:18 +03:00
|
|
|
data.update(info)
|
|
|
|
return data
|
2023-01-07 18:25:21 +03:00
|
|
|
else:
|
2023-01-29 03:42:45 +03:00
|
|
|
return {"status": "ERROR", "msg": "no model loaded"}
|
2023-05-06 22:18:18 +03:00
|
|
|
|
|
|
|
def get_performance(self):
|
|
|
|
if hasattr(self, "voiceChanger"):
|
|
|
|
info = self.voiceChanger.get_performance()
|
|
|
|
return info
|
|
|
|
else:
|
|
|
|
return {"status": "ERROR", "msg": "no model loaded"}
|
2023-01-07 18:25:21 +03:00
|
|
|
|
2023-04-28 00:39:51 +03:00
|
|
|
def update_settings(self, key: str, val: str | int | float):
|
2023-06-15 18:56:18 +03:00
|
|
|
if key in self.settings.intData:
|
|
|
|
setattr(self.settings, key, int(val))
|
|
|
|
if key == "slotIndex":
|
|
|
|
val = val % 1000 # Quick hack for same slot is selected
|
|
|
|
setattr(self.settings, key, int(val))
|
|
|
|
|
|
|
|
newVoiceChanger = VoiceChanger(self.params, self.settings.slotIndex)
|
|
|
|
newVoiceChanger.prepareModel()
|
|
|
|
self.serverDevice.serverLocal(newVoiceChanger)
|
|
|
|
del self.voiceChanger
|
|
|
|
self.voiceChanger = newVoiceChanger
|
|
|
|
elif hasattr(self, "voiceChanger"):
|
|
|
|
self.voiceChanger.update_settings(key, val)
|
2023-01-07 18:25:21 +03:00
|
|
|
else:
|
2023-06-15 18:56:18 +03:00
|
|
|
print(f"[Voice Changer] update is not handled. ({key}:{val})")
|
|
|
|
return self.get_info()
|
2023-01-08 10:18:20 +03:00
|
|
|
|
2023-04-28 00:39:51 +03:00
|
|
|
def changeVoice(self, receivedData: AudioInOut):
|
2023-04-27 17:38:25 +03:00
|
|
|
if hasattr(self, "voiceChanger") is True:
|
2023-03-07 17:14:14 +03:00
|
|
|
return self.voiceChanger.on_request(receivedData)
|
2023-01-04 20:28:36 +03:00
|
|
|
else:
|
|
|
|
print("Voice Change is not loaded. Did you load a correct model?")
|
2023-02-20 22:07:43 +03:00
|
|
|
return np.zeros(1).astype(np.int16), []
|
2023-04-10 18:21:17 +03:00
|
|
|
|
2023-04-13 02:00:28 +03:00
|
|
|
def export2onnx(self):
|
|
|
|
return self.voiceChanger.export2onnx()
|
2023-04-30 20:34:01 +03:00
|
|
|
|
|
|
|
def merge_models(self, request: str):
|
|
|
|
return self.voiceChanger.merge_models(request)
|
2023-05-20 22:21:54 +03:00
|
|
|
|
|
|
|
def update_model_default(self):
|
|
|
|
return self.voiceChanger.update_model_default()
|
2023-06-07 21:08:59 +03:00
|
|
|
|
|
|
|
def update_model_info(self, newData: str):
|
|
|
|
return self.voiceChanger.update_model_info(newData)
|
|
|
|
|
|
|
|
def upload_model_assets(self, params: str):
|
|
|
|
return self.voiceChanger.upload_model_assets(params)
|