diff --git a/server/Exceptions.py b/server/Exceptions.py index 20e72113..9a2104b4 100644 --- a/server/Exceptions.py +++ b/server/Exceptions.py @@ -31,3 +31,8 @@ class ONNXInputArgumentException(Exception): class DeviceCannotSupportHalfPrecisionException(Exception): def __str__(self): return repr("Device cannot support half precision.") + + +class VoiceChangerIsNotSelectedException(Exception): + def __str__(self): + return repr("Voice Changer is not selected.") diff --git a/server/voice_changer/RVC/pipeline/Pipeline.py b/server/voice_changer/RVC/pipeline/Pipeline.py index 7762d548..72a143c9 100644 --- a/server/voice_changer/RVC/pipeline/Pipeline.py +++ b/server/voice_changer/RVC/pipeline/Pipeline.py @@ -131,7 +131,7 @@ class Pipeline(object): padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) try: feats = self.embedder.extractFeatures(feats, embOutputLayer, useFinalProj) - if all(i is None for i in feats): + if torch.isnan(feats).all(): raise DeviceCannotSupportHalfPrecisionException() except RuntimeError as e: if "HALF" in e.__str__().upper(): diff --git a/server/voice_changer/VoiceChanger.py b/server/voice_changer/VoiceChanger.py index 1753268f..03032638 100755 --- a/server/voice_changer/VoiceChanger.py +++ b/server/voice_changer/VoiceChanger.py @@ -22,6 +22,7 @@ from Exceptions import ( NoModeLoadedException, NotEnoughDataExtimateF0, ONNXInputArgumentException, + VoiceChangerIsNotSelectedException, ) from voice_changer.utils.VoiceChangerParams import VoiceChangerParams import threading @@ -326,7 +327,9 @@ class VoiceChanger: def loadModel(self, props: LoadModelParams): try: if self.voiceChanger is None: - raise RuntimeError("Voice Changer is not selected.") + raise VoiceChangerIsNotSelectedException( + "Voice Changer is not selected." + ) return self.voiceChanger.loadModel(props) except Exception as e: print(traceback.format_exc()) @@ -432,7 +435,9 @@ class VoiceChanger: ) -> tuple[AudioInOut, list[Union[int, float]]]: try: if self.voiceChanger is None: - raise RuntimeError("Voice Changer is not selected.") + raise VoiceChangerIsNotSelectedException( + "Voice Changer is not selected." + ) processing_sampling_rate = self.voiceChanger.get_processing_sampling_rate() # 前処理 @@ -570,8 +575,11 @@ class VoiceChanger: except DeviceChangingException as e: print("[Voice Changer] embedder:", e) return np.zeros(1).astype(np.int16), [0, 0, 0] + except VoiceChangerIsNotSelectedException: + print("[Voice Changer] Voice Changer is not selected. please re-select vc.") + return np.zeros(1).astype(np.int16), [0, 0, 0] except Exception as e: - print("VC PROCESSING!!!! EXCEPTION!!!", e) + print("[Voice Changer] VC PROCESSING EXCEPTION!!!", e) print(traceback.format_exc()) return np.zeros(1).astype(np.int16), [0, 0, 0]