mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 21:45:00 +03:00
bugfix: mps for rmvpe
This commit is contained in:
parent
7ab7dba5c8
commit
a8a392b20d
@ -18,10 +18,11 @@ class DeviceManager(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def getDevice(self, id: int):
|
def getDevice(self, id: int):
|
||||||
if id < 0 or (self.gpu_num == 0 and self.mps_enabled is False):
|
if id < 0 or self.gpu_num == 0:
|
||||||
dev = torch.device("cpu")
|
if self.mps_enabled is False:
|
||||||
elif self.mps_enabled:
|
dev = torch.device("cpu")
|
||||||
dev = torch.device("mps")
|
else:
|
||||||
|
dev = torch.device("mps")
|
||||||
else:
|
else:
|
||||||
dev = torch.device("cuda", index=id)
|
dev = torch.device("cuda", index=id)
|
||||||
return dev
|
return dev
|
||||||
@ -51,6 +52,6 @@ class DeviceManager(object):
|
|||||||
try:
|
try:
|
||||||
return torch.cuda.get_device_properties(id).total_memory
|
return torch.cuda.get_device_properties(id).total_memory
|
||||||
# except Exception as e:
|
# except Exception as e:
|
||||||
except:
|
except: # NOQA
|
||||||
# print(e)
|
# print(e)
|
||||||
return 0
|
return 0
|
||||||
|
@ -3,21 +3,19 @@ import torch
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from const import PitchExtractorType
|
from const import PitchExtractorType
|
||||||
from voice_changer.DiffusionSVC.pitchExtractor.PitchExtractor import PitchExtractor
|
from voice_changer.DiffusionSVC.pitchExtractor.PitchExtractor import PitchExtractor
|
||||||
|
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||||
from voice_changer.utils.VoiceChangerModel import AudioInOut
|
from voice_changer.utils.VoiceChangerModel import AudioInOut
|
||||||
|
|
||||||
|
|
||||||
class CrepePitchExtractor(PitchExtractor):
|
class CrepePitchExtractor(PitchExtractor):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, gpu: int):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.pitchExtractorType: PitchExtractorType = "crepe"
|
self.pitchExtractorType: PitchExtractorType = "crepe"
|
||||||
self.f0_min = 50
|
self.f0_min = 50
|
||||||
self.f0_max = 1100
|
self.f0_max = 1100
|
||||||
self.uv_interp = True
|
self.uv_interp = True
|
||||||
if torch.cuda.is_available():
|
self.device = DeviceManager.get_instance().getDevice(gpu)
|
||||||
self.device = torch.device("cuda:" + str(torch.cuda.current_device()))
|
|
||||||
else:
|
|
||||||
self.device = torch.device("cpu")
|
|
||||||
|
|
||||||
def extract(self, audio: AudioInOut, sr: int, block_size: int, model_sr: int, pitch, f0_up_key, silence_front=0):
|
def extract(self, audio: AudioInOut, sr: int, block_size: int, model_sr: int, pitch, f0_up_key, silence_front=0):
|
||||||
hop_size = block_size * sr / model_sr
|
hop_size = block_size * sr / model_sr
|
||||||
|
@ -33,7 +33,7 @@ class PitchExtractorManager(Protocol):
|
|||||||
elif pitchExtractorType == "dio":
|
elif pitchExtractorType == "dio":
|
||||||
return DioPitchExtractor()
|
return DioPitchExtractor()
|
||||||
elif pitchExtractorType == "crepe":
|
elif pitchExtractorType == "crepe":
|
||||||
return CrepePitchExtractor()
|
return CrepePitchExtractor(gpu)
|
||||||
elif pitchExtractorType == "crepe_tiny":
|
elif pitchExtractorType == "crepe_tiny":
|
||||||
return CrepeOnnxPitchExtractor(pitchExtractorType, cls.params.crepe_onnx_tiny, gpu)
|
return CrepeOnnxPitchExtractor(pitchExtractorType, cls.params.crepe_onnx_tiny, gpu)
|
||||||
elif pitchExtractorType == "crepe_full":
|
elif pitchExtractorType == "crepe_full":
|
||||||
|
@ -5,6 +5,7 @@ from const import PitchExtractorType
|
|||||||
from voice_changer.DiffusionSVC.pitchExtractor.PitchExtractor import PitchExtractor
|
from voice_changer.DiffusionSVC.pitchExtractor.PitchExtractor import PitchExtractor
|
||||||
from voice_changer.DiffusionSVC.pitchExtractor.rmvpe.rmvpe import RMVPE
|
from voice_changer.DiffusionSVC.pitchExtractor.rmvpe.rmvpe import RMVPE
|
||||||
from scipy.ndimage import zoom
|
from scipy.ndimage import zoom
|
||||||
|
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||||
|
|
||||||
from voice_changer.utils.VoiceChangerModel import AudioInOut
|
from voice_changer.utils.VoiceChangerModel import AudioInOut
|
||||||
|
|
||||||
@ -18,10 +19,7 @@ class RMVPEPitchExtractor(PitchExtractor):
|
|||||||
self.f0_max = 1100
|
self.f0_max = 1100
|
||||||
self.uv_interp = True
|
self.uv_interp = True
|
||||||
self.input_sr = -1
|
self.input_sr = -1
|
||||||
if torch.cuda.is_available() and gpu >= 0:
|
self.device = DeviceManager.get_instance().getDevice(gpu)
|
||||||
self.device = torch.device("cuda:" + str(torch.cuda.current_device()))
|
|
||||||
else:
|
|
||||||
self.device = torch.device("cpu")
|
|
||||||
|
|
||||||
self.rmvpe = RMVPE(model_path=file, is_half=False, device=self.device)
|
self.rmvpe = RMVPE(model_path=file, is_half=False, device=self.device)
|
||||||
|
|
||||||
|
@ -20,10 +20,11 @@ class DeviceManager(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def getDevice(self, id: int):
|
def getDevice(self, id: int):
|
||||||
if id < 0 or (self.gpu_num == 0 and self.mps_enabled is False):
|
if id < 0 or self.gpu_num == 0:
|
||||||
dev = torch.device("cpu")
|
if self.mps_enabled is False:
|
||||||
elif self.mps_enabled:
|
dev = torch.device("cpu")
|
||||||
dev = torch.device("mps")
|
else:
|
||||||
|
dev = torch.device("mps")
|
||||||
else:
|
else:
|
||||||
if id < self.gpu_num:
|
if id < self.gpu_num:
|
||||||
dev = torch.device("cuda", index=id)
|
dev = torch.device("cuda", index=id)
|
||||||
@ -46,7 +47,7 @@ class DeviceManager(object):
|
|||||||
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
|
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
|
||||||
"inter_op_num_threads": 8,
|
"inter_op_num_threads": 8,
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
elif gpu >= 0 and "DmlExecutionProvider" in availableProviders:
|
elif gpu >= 0 and "DmlExecutionProvider" in availableProviders:
|
||||||
return ["DmlExecutionProvider"], [{"device_id": gpu}]
|
return ["DmlExecutionProvider"], [{"device_id": gpu}]
|
||||||
else:
|
else:
|
||||||
|
@ -1,23 +1,19 @@
|
|||||||
import torchcrepe
|
import torchcrepe
|
||||||
import torch
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from const import PitchExtractorType
|
from const import PitchExtractorType
|
||||||
|
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||||
|
|
||||||
from voice_changer.RVC.pitchExtractor.PitchExtractor import PitchExtractor
|
from voice_changer.RVC.pitchExtractor.PitchExtractor import PitchExtractor
|
||||||
|
|
||||||
|
|
||||||
class CrepePitchExtractor(PitchExtractor):
|
class CrepePitchExtractor(PitchExtractor):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, gpu: int):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.pitchExtractorType: PitchExtractorType = "crepe"
|
self.pitchExtractorType: PitchExtractorType = "crepe"
|
||||||
if torch.cuda.is_available():
|
self.device = DeviceManager.get_instance().getDevice(gpu)
|
||||||
self.device = torch.device("cuda:" + str(torch.cuda.current_device()))
|
|
||||||
else:
|
|
||||||
self.device = torch.device("cpu")
|
|
||||||
|
|
||||||
def extract(self, audio, pitchf, f0_up_key, sr, window, silence_front=0):
|
def extract(self, audio, pitchf, f0_up_key, sr, window, silence_front=0):
|
||||||
n_frames = int(len(audio) // window) + 1
|
|
||||||
start_frame = int(silence_front * sr / window)
|
start_frame = int(silence_front * sr / window)
|
||||||
real_silence_front = start_frame * window / sr
|
real_silence_front = start_frame * window / sr
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ class PitchExtractorManager(Protocol):
|
|||||||
elif pitchExtractorType == "dio":
|
elif pitchExtractorType == "dio":
|
||||||
return DioPitchExtractor()
|
return DioPitchExtractor()
|
||||||
elif pitchExtractorType == "crepe":
|
elif pitchExtractorType == "crepe":
|
||||||
return CrepePitchExtractor()
|
return CrepePitchExtractor(gpu)
|
||||||
elif pitchExtractorType == "crepe_tiny":
|
elif pitchExtractorType == "crepe_tiny":
|
||||||
return CrepeOnnxPitchExtractor(pitchExtractorType, cls.params.crepe_onnx_tiny, gpu)
|
return CrepeOnnxPitchExtractor(pitchExtractorType, cls.params.crepe_onnx_tiny, gpu)
|
||||||
elif pitchExtractorType == "crepe_full":
|
elif pitchExtractorType == "crepe_full":
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
import torch
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from const import PitchExtractorType
|
from const import PitchExtractorType
|
||||||
from voice_changer.DiffusionSVC.pitchExtractor.PitchExtractor import PitchExtractor
|
from voice_changer.DiffusionSVC.pitchExtractor.PitchExtractor import PitchExtractor
|
||||||
from voice_changer.DiffusionSVC.pitchExtractor.rmvpe.rmvpe import RMVPE
|
from voice_changer.DiffusionSVC.pitchExtractor.rmvpe.rmvpe import RMVPE
|
||||||
|
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
|
||||||
|
|
||||||
|
|
||||||
class RMVPEPitchExtractor(PitchExtractor):
|
class RMVPEPitchExtractor(PitchExtractor):
|
||||||
@ -17,11 +17,7 @@ class RMVPEPitchExtractor(PitchExtractor):
|
|||||||
|
|
||||||
self.uv_interp = True
|
self.uv_interp = True
|
||||||
self.input_sr = -1
|
self.input_sr = -1
|
||||||
if torch.cuda.is_available() and gpu >= 0:
|
self.device = DeviceManager.get_instance().getDevice(gpu)
|
||||||
self.device = torch.device("cuda:" + str(torch.cuda.current_device()))
|
|
||||||
else:
|
|
||||||
self.device = torch.device("cpu")
|
|
||||||
|
|
||||||
self.rmvpe = RMVPE(model_path=file, is_half=False, device=self.device)
|
self.rmvpe = RMVPE(model_path=file, is_half=False, device=self.device)
|
||||||
|
|
||||||
def extract(self, audio, pitchf, f0_up_key, sr, window, silence_front=0):
|
def extract(self, audio, pitchf, f0_up_key, sr, window, silence_front=0):
|
||||||
|
Loading…
Reference in New Issue
Block a user