mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 21:45:00 +03:00
update
This commit is contained in:
parent
7f1cdb9ddc
commit
f2ba7e3938
@ -398,7 +398,7 @@ class RVC:
|
||||
|
||||
import voice_changer.RVC.export2onnx as onnxExporter
|
||||
|
||||
output_file_simple = onnxExporter.export2onnx(modelSlot)
|
||||
output_file_simple = onnxExporter.export2onnx(self.settings.gpu, modelSlot)
|
||||
return {
|
||||
"status": "ok",
|
||||
"path": f"/tmp/{output_file_simple}",
|
||||
|
@ -33,8 +33,6 @@ class DeviceManager(object):
|
||||
return False
|
||||
|
||||
gpuName = torch.cuda.get_device_name(id).upper()
|
||||
|
||||
# original: https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/config.py
|
||||
if (
|
||||
("16" in gpuName and "V100" not in gpuName)
|
||||
or "P40" in gpuName.upper()
|
||||
@ -44,3 +42,11 @@ class DeviceManager(object):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def getDeviceMemory(self, id: int):
|
||||
try:
|
||||
return torch.cuda.get_device_properties(id).total_memory
|
||||
# except Exception as e:
|
||||
except:
|
||||
# print(e)
|
||||
return 0
|
||||
|
@ -5,6 +5,7 @@ from onnxsim import simplify
|
||||
import onnx
|
||||
from const import TMP_DIR, EnumInferenceTypes
|
||||
from voice_changer.RVC.ModelSlot import ModelSlot
|
||||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||
|
||||
from voice_changer.RVC.onnx.SynthesizerTrnMs256NSFsid_ONNX import (
|
||||
SynthesizerTrnMs256NSFsid_ONNX,
|
||||
@ -20,7 +21,7 @@ from voice_changer.RVC.onnx.SynthesizerTrnMsNSFsid_webui_ONNX import (
|
||||
)
|
||||
|
||||
|
||||
def export2onnx(modelSlot: ModelSlot):
|
||||
def export2onnx(gpu: int, modelSlot: ModelSlot):
|
||||
pyTorchModelFile = modelSlot.pyTorchModelFile
|
||||
|
||||
output_file = os.path.splitext(os.path.basename(pyTorchModelFile))[0] + ".onnx"
|
||||
@ -39,8 +40,10 @@ def export2onnx(modelSlot: ModelSlot):
|
||||
"embChannels": modelSlot.embChannels,
|
||||
"embedder": modelSlot.embedder.value,
|
||||
}
|
||||
gpuMomory = DeviceManager.get_instance().getDeviceMemory(gpu)
|
||||
print(f"[Voice Changer] exporting onnx... gpu_id:{gpu} gpu_mem:{gpuMomory}")
|
||||
|
||||
if torch.cuda.device_count() > 0:
|
||||
if gpuMomory > 0:
|
||||
_export2onnx(pyTorchModelFile, output_path, output_path_simple, True, metadata)
|
||||
else:
|
||||
print(
|
||||
|
Loading…
Reference in New Issue
Block a user