mirror of
https://github.com/w-okada/voice-changer.git
synced 2025-01-23 13:35:12 +03:00
separate server device fin
This commit is contained in:
parent
8a8386640d
commit
ca9bac0942
@ -24,7 +24,8 @@ class MMVC_Namespace(socketio.AsyncNamespace):
|
||||
def __init__(self, namespace: str, voiceChangerManager: VoiceChangerManager):
|
||||
super().__init__(namespace)
|
||||
self.voiceChangerManager = voiceChangerManager
|
||||
self.voiceChangerManager.voiceChanger.emitTo = self.emit_coroutine
|
||||
# self.voiceChangerManager.voiceChanger.emitTo = self.emit_coroutine
|
||||
self.voiceChangerManager.setEmitTo(self.emit_coroutine)
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls, voiceChangerManager: VoiceChangerManager):
|
||||
@ -34,11 +35,7 @@ class MMVC_Namespace(socketio.AsyncNamespace):
|
||||
|
||||
def on_connect(self, sid, environ):
|
||||
self.sid = sid
|
||||
print(
|
||||
"[{}] connet sid : {}".format(
|
||||
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), sid
|
||||
)
|
||||
)
|
||||
print("[{}] connet sid : {}".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), sid))
|
||||
pass
|
||||
|
||||
async def on_request_message(self, sid, msg):
|
||||
@ -50,9 +47,7 @@ class MMVC_Namespace(socketio.AsyncNamespace):
|
||||
print(data)
|
||||
await self.emit("response", [timestamp, 0], to=sid)
|
||||
else:
|
||||
unpackedData = np.array(
|
||||
struct.unpack("<%sh" % (len(data) // struct.calcsize("<h")), data)
|
||||
).astype(np.int16)
|
||||
unpackedData = np.array(struct.unpack("<%sh" % (len(data) // struct.calcsize("<h")), data)).astype(np.int16)
|
||||
|
||||
res = self.voiceChangerManager.changeVoice(unpackedData)
|
||||
audio1 = res[0]
|
||||
|
@ -107,20 +107,16 @@ class ServerDevice:
|
||||
sd._initialize()
|
||||
|
||||
sd.default.device[0] = self.settings.serverInputDeviceId
|
||||
# currentInputDeviceId = self.settings.serverInputDeviceId
|
||||
sd.default.device[1] = self.settings.serverOutputDeviceId
|
||||
# currentOutputDeviceId = self.settings.serverOutputDeviceId
|
||||
|
||||
serverInputAudioDevice = self.getServerInputAudioDevice(sd.default.device[0])
|
||||
serverOutputAudioDevice = self.getServerOutputAudioDevice(sd.default.device[1])
|
||||
print(serverInputAudioDevice, serverOutputAudioDevice)
|
||||
print("Devices:", serverInputAudioDevice, serverOutputAudioDevice)
|
||||
if serverInputAudioDevice is None or serverOutputAudioDevice is None:
|
||||
time.sleep(2)
|
||||
print("serverInputAudioDevice or serverOutputAudioDevice is None")
|
||||
continue
|
||||
|
||||
# currentInputChannelNum = serverInputAudioDevice.maxInputChannels
|
||||
# currentOutputChannelNum = serverOutputAudioDevice.maxOutputChannels
|
||||
sd.default.channels[0] = serverInputAudioDevice.maxInputChannels
|
||||
sd.default.channels[1] = serverOutputAudioDevice.maxOutputChannels
|
||||
|
||||
@ -164,10 +160,7 @@ class ServerDevice:
|
||||
):
|
||||
while self.settings.serverAudioStated == 1 and sd.default.device[0] == self.settings.serverInputDeviceId and sd.default.device[1] == self.settings.serverOutputDeviceId and currentModelSamplingRate == self.serverDeviceCallbacks.get_processing_sampling_rate() and currentInputChunkNum == self.settings.serverReadChunkSize:
|
||||
time.sleep(2)
|
||||
print(
|
||||
"[Voice Changer] server audio",
|
||||
self.performance,
|
||||
)
|
||||
print("[Voice Changer] server audio", self.performance)
|
||||
print(f"[Voice Changer] started:{self.settings.serverAudioStated}, input:{sd.default.device[0]}, output:{sd.default.device[1]}, mic_sr:{self.settings.serverInputAudioSampleRate}, model_sr:{currentModelSamplingRate}, chunk:{currentInputChunkNum}, ch:[{sd.default.channels}]")
|
||||
|
||||
except Exception as e:
|
||||
|
@ -340,7 +340,6 @@ class RVC:
|
||||
|
||||
audio = torchaudio.functional.resample(audio, self.settings.modelSamplingRate, 16000, rolloff=0.99)
|
||||
repeat = 1 if self.settings.rvcQuality else 0
|
||||
print()
|
||||
sid = 0
|
||||
f0_up_key = self.settings.tran
|
||||
index_rate = self.settings.indexRatio
|
||||
|
@ -8,6 +8,8 @@ from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
|
||||
from dataclasses import dataclass, asdict
|
||||
import torch
|
||||
import threading
|
||||
from typing import Callable
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass()
|
||||
@ -34,7 +36,7 @@ class VoiceChangerManager(ServerDeviceCallbacks):
|
||||
return self.changeVoice(unpackedData)
|
||||
|
||||
def emitTo(self, performance: list[float]):
|
||||
print("emit ", performance)
|
||||
self.emitToFunc(performance)
|
||||
|
||||
def get_processing_sampling_rate(self):
|
||||
return self.voiceChanger.get_processing_sampling_rate()
|
||||
@ -139,3 +141,6 @@ class VoiceChangerManager(ServerDeviceCallbacks):
|
||||
|
||||
def upload_model_assets(self, params: str):
|
||||
return self.voiceChanger.upload_model_assets(params)
|
||||
|
||||
def setEmitTo(self, emitTo: Callable[[Any], None]):
|
||||
self.emitToFunc = emitTo
|
||||
|
Loading…
Reference in New Issue
Block a user