voice-changer/server/voice_changer/RVC/RVC.py

561 lines
20 KiB
Python
Raw Normal View History

2023-04-05 20:31:10 +03:00
import sys
import os
2023-05-02 14:57:12 +03:00
2023-05-02 16:29:28 +03:00
from voice_changer.RVC.pitchExtractor.PitchExtractor import PitchExtractor
from voice_changer.RVC.pitchExtractor.PitchExtractorManager import PitchExtractorManager
2023-05-02 14:57:12 +03:00
# avoiding parse arg error in RVC
sys.argv = ["MMVCServerSIO.py"]
if sys.platform.startswith("darwin"):
baseDir = [x for x in sys.path if x.endswith("Contents/MacOS")]
if len(baseDir) != 1:
print("baseDir should be only one ", baseDir)
sys.exit()
modulePath = os.path.join(baseDir[0], "RVC")
sys.path.append(modulePath)
else:
sys.path.append("RVC")
2023-04-21 23:42:24 +03:00
import json
2023-04-05 20:49:16 +03:00
import resampy
2023-04-30 20:34:01 +03:00
from voice_changer.RVC.MergeModel import merge_model
from voice_changer.RVC.MergeModelRequest import MergeModelRequest
2023-05-02 14:57:12 +03:00
from voice_changer.RVC.ModelSlotGenerator import generateModelSlot
2023-04-17 03:45:12 +03:00
from Exceptions import NoModeLoadedException
2023-04-28 01:36:08 +03:00
from voice_changer.RVC.RVCSettings import RVCSettings
2023-05-02 06:11:00 +03:00
from voice_changer.RVC.embedder.Embedder import Embedder
from voice_changer.RVC.embedder.EmbedderManager import EmbedderManager
2023-05-02 14:57:12 +03:00
from voice_changer.RVC.inferencer.Inferencer import Inferencer
from voice_changer.RVC.inferencer.InferencerManager import InferencerManager
2023-04-30 20:34:01 +03:00
from voice_changer.utils.LoadModelParams import FilePaths, LoadModelParams
2023-04-28 02:01:15 +03:00
from voice_changer.utils.VoiceChangerModel import AudioInOut
2023-04-27 17:38:25 +03:00
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
2023-04-13 02:00:28 +03:00
2023-04-28 01:36:08 +03:00
from dataclasses import asdict
2023-04-28 02:01:15 +03:00
from typing import cast
2023-04-28 00:39:51 +03:00
import numpy as np
import torch
2023-05-02 14:57:12 +03:00
2023-05-02 06:11:00 +03:00
# from fairseq import checkpoint_utils
2023-04-28 03:47:39 +03:00
import traceback
import faiss
2023-04-28 00:39:51 +03:00
2023-05-02 14:57:12 +03:00
from const import TMP_DIR, UPLOAD_DIR
2023-04-05 20:31:10 +03:00
2023-04-28 00:39:51 +03:00
from voice_changer.RVC.custom_vc_infer_pipeline import VC
2023-04-27 17:38:25 +03:00
providers = [
"OpenVINOExecutionProvider",
"CUDAExecutionProvider",
"DmlExecutionProvider",
"CPUExecutionProvider",
]
2023-04-05 20:31:10 +03:00
class RVC:
2023-04-28 02:01:15 +03:00
audio_buffer: AudioInOut | None = None
2023-05-02 06:11:00 +03:00
embedder: Embedder | None = None
2023-05-02 14:57:12 +03:00
inferencer: Inferencer | None = None
2023-05-02 16:29:28 +03:00
pitchExtractor: PitchExtractor | None = None
2023-04-28 02:01:15 +03:00
2023-04-27 17:38:25 +03:00
def __init__(self, params: VoiceChangerParams):
2023-04-22 01:57:51 +03:00
self.initialLoad = True
2023-04-05 20:31:10 +03:00
self.settings = RVCSettings()
2023-05-02 16:29:28 +03:00
self.pitchExtractor = PitchExtractorManager.getPitchExtractor(
self.settings.f0Detector
)
2023-04-21 20:37:38 +03:00
self.feature_file = None
self.index_file = None
2023-04-05 20:31:10 +03:00
self.gpu_num = torch.cuda.device_count()
self.prevVol = 0
self.params = params
2023-04-27 17:38:25 +03:00
self.mps_enabled: bool = (
getattr(torch.backends, "mps", None) is not None
and torch.backends.mps.is_available()
)
2023-04-21 13:20:46 +03:00
self.currentSlot = -1
2023-04-05 20:31:10 +03:00
print("RVC initialization: ", params)
2023-04-17 21:06:31 +03:00
print("mps: ", self.mps_enabled)
2023-04-05 20:31:10 +03:00
2023-04-28 00:39:51 +03:00
def loadModel(self, props: LoadModelParams):
2023-04-28 01:36:08 +03:00
"""
2023-05-02 14:57:12 +03:00
loadModelはスロットへのエントリ(推論向けにはロードしない)
例外的にまだ一つも推論向けにロードされていない場合と稼働中スロットの場合はロードする
2023-04-28 01:36:08 +03:00
"""
2023-04-28 00:39:51 +03:00
self.is_half = props.isHalf
2023-05-02 14:57:12 +03:00
target_slot_idx = props.slot
2023-04-28 00:39:51 +03:00
params_str = props.params
2023-04-21 23:42:24 +03:00
params = json.loads(params_str)
2023-04-16 03:56:12 +03:00
2023-05-02 14:57:12 +03:00
modelSlot = generateModelSlot(props.files, params)
self.settings.modelSlots[target_slot_idx] = modelSlot
2023-04-28 01:36:08 +03:00
print(
2023-05-02 14:57:12 +03:00
f"[Voice Changer] RVC new model is uploaded,{target_slot_idx}",
asdict(modelSlot),
2023-04-28 01:36:08 +03:00
)
2023-04-05 20:31:10 +03:00
2023-04-28 01:36:08 +03:00
# 初回のみロード
2023-05-02 14:57:12 +03:00
if self.initialLoad or target_slot_idx == self.currentSlot:
self.prepareModel(target_slot_idx)
self.settings.modelSlotIndex = target_slot_idx
# self.currentSlot = self.settings.modelSlotIndex
2023-04-22 01:57:51 +03:00
self.switchModel()
self.initialLoad = False
2023-04-21 09:48:12 +03:00
return self.get_info()
2023-05-02 16:29:28 +03:00
def _getDevice(self):
if self.settings.gpu < 0 or (self.gpu_num == 0 and self.mps_enabled is False):
dev = torch.device("cpu")
elif self.mps_enabled:
dev = torch.device("mps")
else:
dev = torch.device("cuda", index=self.settings.gpu)
return dev
2023-04-21 13:20:46 +03:00
def prepareModel(self, slot: int):
2023-04-28 03:47:39 +03:00
if slot < 0:
return self.get_info()
2023-04-21 13:20:46 +03:00
print("[Voice Changer] Prepare Model of slot:", slot)
2023-05-02 14:57:12 +03:00
modelSlot = self.settings.modelSlots[slot]
filename = (
modelSlot.onnxModelFile if modelSlot.isONNX else modelSlot.pyTorchModelFile
2023-04-28 01:36:08 +03:00
)
2023-05-02 16:29:28 +03:00
dev = self._getDevice()
2023-04-05 20:31:10 +03:00
2023-05-02 14:57:12 +03:00
# Inferencerのロード
inferencer = InferencerManager.getInferencer(
modelSlot.modelType,
filename,
self.settings.isHalf,
2023-05-02 16:29:28 +03:00
dev,
2023-05-02 14:57:12 +03:00
)
self.next_inferencer = inferencer
2023-04-21 09:48:12 +03:00
2023-04-28 03:47:39 +03:00
# Indexのロード
print("[Voice Changer] Loading index...")
2023-05-02 14:57:12 +03:00
if modelSlot.featureFile is not None and modelSlot.indexFile is not None:
2023-04-28 03:47:39 +03:00
if (
2023-05-02 14:57:12 +03:00
os.path.exists(modelSlot.featureFile) is True
and os.path.exists(modelSlot.indexFile) is True
2023-04-28 03:47:39 +03:00
):
try:
2023-05-02 14:57:12 +03:00
self.next_index = faiss.read_index(modelSlot.indexFile)
self.next_feature = np.load(modelSlot.featureFile)
2023-04-28 03:47:39 +03:00
except:
print("[Voice Changer] load index failed. Use no index.")
traceback.print_exc()
self.next_index = self.next_feature = None
else:
print("[Voice Changer] Index file is not found. Use no index.")
self.next_index = self.next_feature = None
else:
self.next_index = self.next_feature = None
2023-05-02 14:57:12 +03:00
self.next_trans = modelSlot.defaultTrans
self.next_samplingRate = modelSlot.samplingRate
self.next_embedder = modelSlot.embedder
self.next_framework = "ONNX" if modelSlot.isONNX else "PyTorch"
2023-04-28 01:36:08 +03:00
print("[Voice Changer] Prepare done.")
2023-04-05 20:31:10 +03:00
return self.get_info()
2023-04-21 13:20:46 +03:00
def switchModel(self):
2023-04-28 01:36:08 +03:00
print("[Voice Changer] Switching model..")
2023-05-02 16:29:28 +03:00
if self.settings.gpu < 0 or (self.gpu_num == 0 and self.mps_enabled is False):
dev = torch.device("cpu")
elif self.mps_enabled:
dev = torch.device("mps")
else:
dev = torch.device("cuda", index=self.settings.gpu)
# embedderはモデルによらず再利用できる可能性が高いので、Switchのタイミングでこちらで取得
2023-05-02 06:11:00 +03:00
try:
self.embedder = EmbedderManager.getEmbedder(
self.next_embedder,
self.params.hubert_base,
True,
torch.device("cuda:0"),
)
except Exception as e:
print("[Voice Changer] load hubert error", e)
traceback.print_exc()
2023-05-02 14:57:12 +03:00
self.inferencer = self.next_inferencer
2023-04-28 03:47:39 +03:00
self.feature = self.next_feature
self.index = self.next_index
2023-04-21 23:42:24 +03:00
self.settings.tran = self.next_trans
2023-04-24 21:03:38 +03:00
self.settings.framework = self.next_framework
self.settings.modelSamplingRate = self.next_samplingRate
2023-05-02 14:57:12 +03:00
2023-04-21 13:20:46 +03:00
self.next_net_g = None
self.next_onnx_session = None
2023-04-27 17:38:25 +03:00
print(
"[Voice Changer] Switching model..done",
)
2023-04-21 13:20:46 +03:00
2023-04-28 02:01:15 +03:00
def update_settings(self, key: str, val: int | float | str):
2023-05-02 14:57:12 +03:00
# if key == "onnxExecutionProvider" and self.onnx_session is not None:
# if val == "CUDAExecutionProvider":
# if self.settings.gpu < 0 or self.settings.gpu >= self.gpu_num:
# self.settings.gpu = 0
# provider_options = [{"device_id": self.settings.gpu}]
# self.onnx_session.set_providers(
# providers=[val], provider_options=provider_options
# )
# if hasattr(self, "hubert_onnx"):
# self.hubert_onnx.set_providers(
# providers=[val], provider_options=provider_options
# )
# else:
# self.onnx_session.set_providers(providers=[val])
# if hasattr(self, "hubert_onnx"):
# self.hubert_onnx.set_providers(providers=[val])
# elif key == "onnxExecutionProvider" and self.onnx_session is None:
# print("Onnx is not enabled. Please load model.")
# return False
if key in self.settings.intData:
2023-04-28 02:01:15 +03:00
val = cast(int, val)
2023-05-02 14:57:12 +03:00
# if (
# key == "gpu"
# and val >= 0
# and val < self.gpu_num
# and self.onnx_session is not None
# ):
# providers = self.onnx_session.get_providers()
# print("Providers:", providers)
# if "CUDAExecutionProvider" in providers:
# provider_options = [{"device_id": self.settings.gpu}]
# self.onnx_session.set_providers(
# providers=["CUDAExecutionProvider"],
# provider_options=provider_options,
# )
2023-04-21 09:48:12 +03:00
if key == "modelSlotIndex":
2023-04-29 01:05:44 +03:00
if int(val) < 0:
return True
2023-04-21 13:20:46 +03:00
# self.switchModel(int(val))
2023-04-25 09:01:19 +03:00
val = int(val) % 1000 # Quick hack for same slot is selected
self.prepareModel(val)
self.currentSlot = -1
setattr(self.settings, key, int(val))
2023-04-05 20:31:10 +03:00
elif key in self.settings.floatData:
setattr(self.settings, key, float(val))
elif key in self.settings.strData:
setattr(self.settings, key, str(val))
else:
return False
return True
def get_info(self):
data = asdict(self.settings)
2023-05-02 14:57:12 +03:00
# data["onnxExecutionProviders"] = (
# self.onnx_session.get_providers() if self.onnx_session is not None else []
# )
2023-04-05 20:31:10 +03:00
files = ["configFile", "pyTorchModelFile", "onnxModelFile"]
for f in files:
2023-04-28 00:39:51 +03:00
if data[f] is not None and os.path.exists(data[f]):
2023-04-05 20:31:10 +03:00
data[f] = os.path.basename(data[f])
else:
data[f] = ""
return data
def get_processing_sampling_rate(self):
return self.settings.modelSamplingRate
2023-04-05 20:31:10 +03:00
2023-04-27 17:38:25 +03:00
def generate_input(
2023-04-28 02:01:15 +03:00
self,
newData: AudioInOut,
inputSize: int,
crossfadeSize: int,
solaSearchFrame: int = 0,
2023-04-27 17:38:25 +03:00
):
2023-04-14 03:18:34 +03:00
newData = newData.astype(np.float32) / 32768.0
2023-04-28 02:01:15 +03:00
if self.audio_buffer is not None:
# 過去のデータに連結
self.audio_buffer = np.concatenate([self.audio_buffer, newData], 0)
2023-04-14 03:18:34 +03:00
else:
self.audio_buffer = newData
2023-04-27 17:38:25 +03:00
convertSize = (
inputSize + crossfadeSize + solaSearchFrame + self.settings.extraConvertSize
)
2023-04-14 03:18:34 +03:00
if convertSize % 128 != 0: # モデルの出力のホップサイズで切り捨てが発生するので補う。
convertSize = convertSize + (128 - (convertSize % 128))
2023-04-28 02:01:15 +03:00
convertOffset = -1 * convertSize
self.audio_buffer = self.audio_buffer[convertOffset:] # 変換対象の部分だけ抽出
2023-04-14 03:18:34 +03:00
2023-04-28 02:01:15 +03:00
# 出力部分だけ切り出して音量を確認。(TODO:段階的消音にする)
cropOffset = -1 * (inputSize + crossfadeSize)
cropEnd = -1 * (crossfadeSize)
crop = self.audio_buffer[cropOffset:cropEnd]
2023-04-14 03:18:34 +03:00
rms = np.sqrt(np.square(crop).mean(axis=0))
vol = max(rms, self.prevVol * 0.0)
self.prevVol = vol
2023-04-05 20:31:10 +03:00
return (self.audio_buffer, convertSize, vol)
def _onnx_inference(self, data):
2023-04-28 02:01:15 +03:00
if hasattr(self, "onnx_session") is False or self.onnx_session is None:
2023-04-07 21:11:37 +03:00
print("[Voice Changer] No onnx session.")
2023-04-17 03:45:12 +03:00
raise NoModeLoadedException("ONNX")
2023-04-07 21:11:37 +03:00
if self.settings.gpu < 0 or self.gpu_num == 0:
dev = torch.device("cpu")
else:
dev = torch.device("cuda", index=self.settings.gpu)
2023-05-02 06:11:00 +03:00
# self.hubert_model = self.hubert_model.to(dev)
self.embedder = self.embedder.to(dev)
2023-04-07 21:11:37 +03:00
audio = data[0]
convertSize = data[1]
vol = data[2]
audio = resampy.resample(audio, self.settings.modelSamplingRate, 16000)
2023-04-07 21:11:37 +03:00
if vol < self.settings.silentThreshold:
return np.zeros(convertSize).astype(np.int16)
with torch.no_grad():
2023-04-07 23:34:26 +03:00
repeat = 3 if self.is_half else 1
repeat *= self.settings.rvcQuality # 0 or 3
2023-05-02 14:57:12 +03:00
vc = VC(
self.settings.modelSamplingRate,
torch.device("cuda:0"),
self.is_half,
repeat,
)
2023-04-07 21:11:37 +03:00
sid = 0
f0_up_key = self.settings.tran
2023-04-18 05:53:44 +03:00
f0_method = self.settings.f0Detector
2023-04-07 21:11:37 +03:00
index_rate = self.settings.indexRatio
2023-04-24 21:03:38 +03:00
if_f0 = 1 if self.settings.modelSlots[self.currentSlot].f0 else 0
2023-04-07 21:11:37 +03:00
embChannels = self.settings.modelSlots[self.currentSlot].embChannels
2023-04-27 17:38:25 +03:00
audio_out = vc.pipeline(
2023-05-02 06:11:00 +03:00
# self.hubert_model,
self.embedder,
2023-04-27 17:38:25 +03:00
self.onnx_session,
2023-05-02 16:29:28 +03:00
self.pitchExtractor,
2023-04-27 17:38:25 +03:00
sid,
audio,
f0_up_key,
f0_method,
2023-04-28 04:37:55 +03:00
self.index,
self.feature,
2023-04-27 17:38:25 +03:00
index_rate,
if_f0,
silence_front=self.settings.extraConvertSize
/ self.settings.modelSamplingRate,
embChannels=embChannels,
)
2023-04-07 21:11:37 +03:00
result = audio_out * np.sqrt(vol)
return result
2023-04-05 20:31:10 +03:00
def _pyTorch_inference(self, data):
2023-05-02 14:57:12 +03:00
# if hasattr(self, "net_g") is False or self.net_g is None:
# print(
# "[Voice Changer] No pyTorch session.",
# hasattr(self, "net_g"),
# self.net_g,
# )
# raise NoModeLoadedException("pytorch")
2023-04-05 20:31:10 +03:00
2023-04-28 00:39:51 +03:00
if self.settings.gpu < 0 or (self.gpu_num == 0 and self.mps_enabled is False):
2023-04-05 20:31:10 +03:00
dev = torch.device("cpu")
2023-04-17 21:06:31 +03:00
elif self.mps_enabled:
dev = torch.device("mps")
2023-04-05 20:31:10 +03:00
else:
dev = torch.device("cuda", index=self.settings.gpu)
2023-05-02 06:11:00 +03:00
self.embedder = self.embedder.to(dev)
2023-05-02 14:57:12 +03:00
self.inferencer = self.inferencer.to(dev)
# self.embedder.printDevice()
# self.inferencer.printDevice()
2023-04-07 21:11:37 +03:00
2023-04-05 20:31:10 +03:00
audio = data[0]
convertSize = data[1]
vol = data[2]
2023-04-28 04:37:55 +03:00
audio = resampy.resample(audio, self.settings.modelSamplingRate, 16000)
2023-04-05 20:31:10 +03:00
if vol < self.settings.silentThreshold:
return np.zeros(convertSize).astype(np.int16)
with torch.no_grad():
repeat = 3 if self.is_half else 1
repeat *= self.settings.rvcQuality # 0 or 3
vc = VC(self.settings.modelSamplingRate, dev, self.is_half, repeat)
2023-04-05 20:31:10 +03:00
sid = 0
2023-04-05 21:07:45 +03:00
f0_up_key = self.settings.tran
2023-04-18 06:00:49 +03:00
f0_method = self.settings.f0Detector
2023-04-07 21:11:37 +03:00
index_rate = self.settings.indexRatio
2023-04-24 21:03:38 +03:00
if_f0 = 1 if self.settings.modelSlots[self.currentSlot].f0 else 0
2023-04-05 20:31:10 +03:00
embChannels = self.settings.modelSlots[self.currentSlot].embChannels
2023-04-27 17:38:25 +03:00
audio_out = vc.pipeline(
2023-05-02 06:11:00 +03:00
self.embedder,
2023-05-02 14:57:12 +03:00
self.inferencer,
2023-05-02 16:29:28 +03:00
self.pitchExtractor,
2023-04-27 17:38:25 +03:00
sid,
audio,
f0_up_key,
f0_method,
2023-04-28 03:47:39 +03:00
self.index,
self.feature,
2023-04-27 17:38:25 +03:00
index_rate,
if_f0,
silence_front=self.settings.extraConvertSize
/ self.settings.modelSamplingRate,
embChannels=embChannels,
)
2023-04-19 01:57:19 +03:00
2023-04-05 21:07:45 +03:00
result = audio_out * np.sqrt(vol)
2023-04-05 20:49:16 +03:00
2023-04-05 20:31:10 +03:00
return result
def inference(self, data):
2023-04-25 09:01:19 +03:00
if self.settings.modelSlotIndex < 0:
2023-04-27 17:38:25 +03:00
print(
"[Voice Changer] wait for loading model...",
self.settings.modelSlotIndex,
self.currentSlot,
)
2023-04-24 21:03:38 +03:00
raise NoModeLoadedException("model_common")
2023-04-22 23:09:19 +03:00
if self.currentSlot != self.settings.modelSlotIndex:
print(f"Switch model {self.currentSlot} -> {self.settings.modelSlotIndex}")
self.currentSlot = self.settings.modelSlotIndex
2023-04-22 01:57:51 +03:00
self.switchModel()
2023-04-21 13:20:46 +03:00
2023-04-22 01:57:51 +03:00
if self.settings.framework == "ONNX":
audio = self._onnx_inference(data)
else:
audio = self._pyTorch_inference(data)
2023-04-21 23:42:24 +03:00
2023-04-22 01:57:51 +03:00
return audio
2023-04-05 20:31:10 +03:00
2023-04-10 18:21:17 +03:00
def __del__(self):
2023-04-05 20:31:10 +03:00
del self.net_g
del self.onnx_session
2023-04-10 18:21:17 +03:00
2023-04-29 01:05:44 +03:00
print("---------- REMOVING ---------------")
2023-04-10 18:21:17 +03:00
remove_path = os.path.join("RVC")
2023-04-28 02:01:15 +03:00
sys.path = [x for x in sys.path if x.endswith(remove_path) is False]
2023-04-10 18:21:17 +03:00
for key in list(sys.modules):
val = sys.modules.get(key)
try:
file_path = val.__file__
2023-04-11 01:37:39 +03:00
if file_path.find("RVC" + os.path.sep) >= 0:
2023-04-10 18:21:17 +03:00
print("remove", key, file_path)
sys.modules.pop(key)
2023-04-29 01:05:44 +03:00
except Exception: # type:ignore
# print(e)
2023-04-10 18:21:17 +03:00
pass
2023-04-13 02:00:28 +03:00
def export2onnx(self):
2023-04-28 02:01:15 +03:00
if hasattr(self, "net_g") is False or self.net_g is None:
2023-04-13 02:00:28 +03:00
print("[Voice Changer] export2onnx, No pyTorch session.")
2023-04-28 02:01:15 +03:00
return {"status": "ng", "path": ""}
2023-04-22 09:12:10 +03:00
2023-04-27 17:38:25 +03:00
pyTorchModelFile = self.settings.modelSlots[
self.settings.modelSlotIndex
].pyTorchModelFile # inference前にexportできるようにcurrentSlotではなくslot
2023-04-22 09:12:10 +03:00
2023-04-28 02:01:15 +03:00
if pyTorchModelFile is None:
2023-04-13 02:00:28 +03:00
print("[Voice Changer] export2onnx, No pyTorch filepath.")
2023-04-28 02:01:15 +03:00
return {"status": "ng", "path": ""}
2023-04-13 02:00:28 +03:00
import voice_changer.RVC.export2onnx as onnxExporter
2023-04-22 09:12:10 +03:00
output_file = os.path.splitext(os.path.basename(pyTorchModelFile))[0] + ".onnx"
2023-04-27 17:38:25 +03:00
output_file_simple = (
os.path.splitext(os.path.basename(pyTorchModelFile))[0] + "_simple.onnx"
)
2023-04-13 02:00:28 +03:00
output_path = os.path.join(TMP_DIR, output_file)
output_path_simple = os.path.join(TMP_DIR, output_file_simple)
2023-04-27 17:38:25 +03:00
print(
"embChannels",
self.settings.modelSlots[self.settings.modelSlotIndex].embChannels,
)
metadata = {
"application": "VC_CLIENT",
"version": "1",
2023-04-27 17:38:25 +03:00
"modelType": self.settings.modelSlots[
self.settings.modelSlotIndex
].modelType,
"samplingRate": self.settings.modelSlots[
self.settings.modelSlotIndex
].samplingRate,
"f0": self.settings.modelSlots[self.settings.modelSlotIndex].f0,
2023-04-27 17:38:25 +03:00
"embChannels": self.settings.modelSlots[
self.settings.modelSlotIndex
].embChannels,
"embedder": self.settings.modelSlots[self.settings.modelSlotIndex].embedder,
}
2023-04-13 02:00:28 +03:00
2023-04-14 09:48:38 +03:00
if torch.cuda.device_count() > 0:
2023-04-27 17:38:25 +03:00
onnxExporter.export2onnx(
pyTorchModelFile, output_path, output_path_simple, True, metadata
)
2023-04-14 09:48:38 +03:00
else:
2023-04-27 17:38:25 +03:00
print(
"[Voice Changer] Warning!!! onnx export with float32. maybe size is doubled."
)
onnxExporter.export2onnx(
pyTorchModelFile, output_path, output_path_simple, False, metadata
)
return {
"status": "ok",
"path": f"/tmp/{output_file_simple}",
"filename": output_file_simple,
}
2023-04-30 20:34:01 +03:00
def merge_models(self, request: str):
print("[Voice Changer] MergeRequest:", request)
req: MergeModelRequest = MergeModelRequest.from_json(request)
merged = merge_model(req)
targetSlot = 0
if req.slot < 0:
targetSlot = len(self.settings.modelSlots) - 1
else:
targetSlot = req.slot
storeDir = os.path.join(UPLOAD_DIR, f"{targetSlot}")
print("[Voice Changer] store merged model to:", storeDir)
os.makedirs(storeDir, exist_ok=True)
storeFile = os.path.join(storeDir, "merged.pth")
torch.save(merged, storeFile)
filePaths: FilePaths = FilePaths(
pyTorchModelFilename=storeFile,
configFilename=None,
onnxModelFilename=None,
featureFilename=None,
indexFilename=None,
clusterTorchModelFilename=None,
)
2023-05-02 06:11:00 +03:00
params = {"trans": req.defaultTrans}
2023-04-30 20:34:01 +03:00
props: LoadModelParams = LoadModelParams(
2023-05-01 10:26:56 +03:00
slot=targetSlot, isHalf=True, files=filePaths, params=json.dumps(params)
2023-04-30 20:34:01 +03:00
)
self.loadModel(props)
self.prepareModel(targetSlot)
self.settings.modelSlotIndex = targetSlot
self.currentSlot = self.settings.modelSlotIndex
2023-05-01 10:26:56 +03:00
# self.settings.tran = req.defaultTrans