mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 21:45:00 +03:00
separate server device fin
This commit is contained in:
parent
8a8386640d
commit
ca9bac0942
@ -24,7 +24,8 @@ class MMVC_Namespace(socketio.AsyncNamespace):
|
|||||||
def __init__(self, namespace: str, voiceChangerManager: VoiceChangerManager):
|
def __init__(self, namespace: str, voiceChangerManager: VoiceChangerManager):
|
||||||
super().__init__(namespace)
|
super().__init__(namespace)
|
||||||
self.voiceChangerManager = voiceChangerManager
|
self.voiceChangerManager = voiceChangerManager
|
||||||
self.voiceChangerManager.voiceChanger.emitTo = self.emit_coroutine
|
# self.voiceChangerManager.voiceChanger.emitTo = self.emit_coroutine
|
||||||
|
self.voiceChangerManager.setEmitTo(self.emit_coroutine)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_instance(cls, voiceChangerManager: VoiceChangerManager):
|
def get_instance(cls, voiceChangerManager: VoiceChangerManager):
|
||||||
@ -34,11 +35,7 @@ class MMVC_Namespace(socketio.AsyncNamespace):
|
|||||||
|
|
||||||
def on_connect(self, sid, environ):
|
def on_connect(self, sid, environ):
|
||||||
self.sid = sid
|
self.sid = sid
|
||||||
print(
|
print("[{}] connet sid : {}".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), sid))
|
||||||
"[{}] connet sid : {}".format(
|
|
||||||
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), sid
|
|
||||||
)
|
|
||||||
)
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
async def on_request_message(self, sid, msg):
|
async def on_request_message(self, sid, msg):
|
||||||
@ -50,9 +47,7 @@ class MMVC_Namespace(socketio.AsyncNamespace):
|
|||||||
print(data)
|
print(data)
|
||||||
await self.emit("response", [timestamp, 0], to=sid)
|
await self.emit("response", [timestamp, 0], to=sid)
|
||||||
else:
|
else:
|
||||||
unpackedData = np.array(
|
unpackedData = np.array(struct.unpack("<%sh" % (len(data) // struct.calcsize("<h")), data)).astype(np.int16)
|
||||||
struct.unpack("<%sh" % (len(data) // struct.calcsize("<h")), data)
|
|
||||||
).astype(np.int16)
|
|
||||||
|
|
||||||
res = self.voiceChangerManager.changeVoice(unpackedData)
|
res = self.voiceChangerManager.changeVoice(unpackedData)
|
||||||
audio1 = res[0]
|
audio1 = res[0]
|
||||||
|
@ -107,20 +107,16 @@ class ServerDevice:
|
|||||||
sd._initialize()
|
sd._initialize()
|
||||||
|
|
||||||
sd.default.device[0] = self.settings.serverInputDeviceId
|
sd.default.device[0] = self.settings.serverInputDeviceId
|
||||||
# currentInputDeviceId = self.settings.serverInputDeviceId
|
|
||||||
sd.default.device[1] = self.settings.serverOutputDeviceId
|
sd.default.device[1] = self.settings.serverOutputDeviceId
|
||||||
# currentOutputDeviceId = self.settings.serverOutputDeviceId
|
|
||||||
|
|
||||||
serverInputAudioDevice = self.getServerInputAudioDevice(sd.default.device[0])
|
serverInputAudioDevice = self.getServerInputAudioDevice(sd.default.device[0])
|
||||||
serverOutputAudioDevice = self.getServerOutputAudioDevice(sd.default.device[1])
|
serverOutputAudioDevice = self.getServerOutputAudioDevice(sd.default.device[1])
|
||||||
print(serverInputAudioDevice, serverOutputAudioDevice)
|
print("Devices:", serverInputAudioDevice, serverOutputAudioDevice)
|
||||||
if serverInputAudioDevice is None or serverOutputAudioDevice is None:
|
if serverInputAudioDevice is None or serverOutputAudioDevice is None:
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
print("serverInputAudioDevice or serverOutputAudioDevice is None")
|
print("serverInputAudioDevice or serverOutputAudioDevice is None")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# currentInputChannelNum = serverInputAudioDevice.maxInputChannels
|
|
||||||
# currentOutputChannelNum = serverOutputAudioDevice.maxOutputChannels
|
|
||||||
sd.default.channels[0] = serverInputAudioDevice.maxInputChannels
|
sd.default.channels[0] = serverInputAudioDevice.maxInputChannels
|
||||||
sd.default.channels[1] = serverOutputAudioDevice.maxOutputChannels
|
sd.default.channels[1] = serverOutputAudioDevice.maxOutputChannels
|
||||||
|
|
||||||
@ -164,10 +160,7 @@ class ServerDevice:
|
|||||||
):
|
):
|
||||||
while self.settings.serverAudioStated == 1 and sd.default.device[0] == self.settings.serverInputDeviceId and sd.default.device[1] == self.settings.serverOutputDeviceId and currentModelSamplingRate == self.serverDeviceCallbacks.get_processing_sampling_rate() and currentInputChunkNum == self.settings.serverReadChunkSize:
|
while self.settings.serverAudioStated == 1 and sd.default.device[0] == self.settings.serverInputDeviceId and sd.default.device[1] == self.settings.serverOutputDeviceId and currentModelSamplingRate == self.serverDeviceCallbacks.get_processing_sampling_rate() and currentInputChunkNum == self.settings.serverReadChunkSize:
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
print(
|
print("[Voice Changer] server audio", self.performance)
|
||||||
"[Voice Changer] server audio",
|
|
||||||
self.performance,
|
|
||||||
)
|
|
||||||
print(f"[Voice Changer] started:{self.settings.serverAudioStated}, input:{sd.default.device[0]}, output:{sd.default.device[1]}, mic_sr:{self.settings.serverInputAudioSampleRate}, model_sr:{currentModelSamplingRate}, chunk:{currentInputChunkNum}, ch:[{sd.default.channels}]")
|
print(f"[Voice Changer] started:{self.settings.serverAudioStated}, input:{sd.default.device[0]}, output:{sd.default.device[1]}, mic_sr:{self.settings.serverInputAudioSampleRate}, model_sr:{currentModelSamplingRate}, chunk:{currentInputChunkNum}, ch:[{sd.default.channels}]")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -340,7 +340,6 @@ class RVC:
|
|||||||
|
|
||||||
audio = torchaudio.functional.resample(audio, self.settings.modelSamplingRate, 16000, rolloff=0.99)
|
audio = torchaudio.functional.resample(audio, self.settings.modelSamplingRate, 16000, rolloff=0.99)
|
||||||
repeat = 1 if self.settings.rvcQuality else 0
|
repeat = 1 if self.settings.rvcQuality else 0
|
||||||
print()
|
|
||||||
sid = 0
|
sid = 0
|
||||||
f0_up_key = self.settings.tran
|
f0_up_key = self.settings.tran
|
||||||
index_rate = self.settings.indexRatio
|
index_rate = self.settings.indexRatio
|
||||||
|
@ -8,6 +8,8 @@ from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
|
|||||||
from dataclasses import dataclass, asdict
|
from dataclasses import dataclass, asdict
|
||||||
import torch
|
import torch
|
||||||
import threading
|
import threading
|
||||||
|
from typing import Callable
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
@dataclass()
|
@dataclass()
|
||||||
@ -34,7 +36,7 @@ class VoiceChangerManager(ServerDeviceCallbacks):
|
|||||||
return self.changeVoice(unpackedData)
|
return self.changeVoice(unpackedData)
|
||||||
|
|
||||||
def emitTo(self, performance: list[float]):
|
def emitTo(self, performance: list[float]):
|
||||||
print("emit ", performance)
|
self.emitToFunc(performance)
|
||||||
|
|
||||||
def get_processing_sampling_rate(self):
|
def get_processing_sampling_rate(self):
|
||||||
return self.voiceChanger.get_processing_sampling_rate()
|
return self.voiceChanger.get_processing_sampling_rate()
|
||||||
@ -139,3 +141,6 @@ class VoiceChangerManager(ServerDeviceCallbacks):
|
|||||||
|
|
||||||
def upload_model_assets(self, params: str):
|
def upload_model_assets(self, params: str):
|
||||||
return self.voiceChanger.upload_model_assets(params)
|
return self.voiceChanger.upload_model_assets(params)
|
||||||
|
|
||||||
|
def setEmitTo(self, emitTo: Callable[[Any], None]):
|
||||||
|
self.emitToFunc = emitTo
|
||||||
|
Loading…
Reference in New Issue
Block a user