separate fastapi

This commit is contained in:
wataru 2022-12-31 18:06:57 +09:00
parent fa00613dbe
commit de1043aa8a
5 changed files with 110 additions and 26 deletions

View File

@ -1,11 +1,11 @@
import sys, os, struct, argparse, shutil, base64, traceback
import sys, os, struct, argparse, shutil, traceback, base64, struct
import numpy as np
import misc.log_control
from dataclasses import dataclass
from datetime import datetime
from distutils.util import strtobool
import numpy as np
from scipy.io.wavfile import write, read
sys.path.append("MMVC_Trainer")
@ -41,13 +41,10 @@ from mods.ssl import create_self_signed_cert
from voice_changer.VoiceChangerManager import VoiceChangerManager
from sio.MMVC_SocketIOServer import MMVC_SocketIOServer
@dataclass
class ExApplicationInfo():
external_tensorboard_port: int
exApplitionInfo = ExApplicationInfo(external_tensorboard_port=0)
from restapi.MMVC_Rest_VoiceChanger import MMVC_Rest_VoiceChanger
from restapi.MMVC_Rest_Hello import MMVC_Rest_Hello
from pydantic import BaseModel
class VoiceModel(BaseModel):
gpu: int
@ -57,6 +54,16 @@ class VoiceModel(BaseModel):
prefixChunkSize: int
buffer: str
@dataclass
class ExApplicationInfo():
external_tensorboard_port: int
exApplitionInfo = ExApplicationInfo(external_tensorboard_port=0)
def setupArgParser():
parser = argparse.ArgumentParser()
parser.add_argument("-t", type=str, default="MMVC",
@ -146,17 +153,16 @@ if __name__ == thisFilename or args.colab == True:
app_fastapi.mount(
"/recorder", StaticFiles(directory="../frontend/dist", html=True), name="static")
# sio = socketio.AsyncServer(
# async_mode='asgi',
# cors_allowed_origins='*'
# )
voiceChangerManager = VoiceChangerManager.get_instance()
# namespace = MMVC_Namespace.get_instance(voiceChangerManager)
# sio.register_namespace(namespace)
sio = MMVC_SocketIOServer.get_instance(voiceChangerManager)
voiceChangerManager = VoiceChangerManager.get_instance()
if CONFIG and MODEL:
voiceChangerManager.loadModel(CONFIG, MODEL)
# namespace.loadWhisperModel("base")
sio = MMVC_SocketIOServer.get_instance(voiceChangerManager)
restHello = MMVC_Rest_Hello()
app_fastapi.include_router(restHello.router)
restVoiceChanger = MMVC_Rest_VoiceChanger(voiceChangerManager)
app_fastapi.include_router(restVoiceChanger.router)
app_socketio = socketio.ASGIApp(
sio,
@ -171,10 +177,6 @@ if __name__ == thisFilename or args.colab == True:
}
)
@app_fastapi.get("/api/hello")
async def index():
return {"result": "Index"}
############
# File Uploder
# ##########
@ -233,8 +235,8 @@ if __name__ == thisFilename or args.colab == True:
# Voice Changer
# ##########
@app_fastapi.post("/test")
async def post_test(voice: VoiceModel):
@app_fastapi.post("/test2")
async def post_test2(voice: VoiceModel):
try:
# print("POST REQUEST PROCESSING....")
gpu = voice.gpu
@ -342,7 +344,7 @@ if __name__ == thisFilename or args.colab == True:
return JSONResponse(content=json_compatible_item_data)
if __name__ == '__mp_main__':
printMessage(f"PHASE2adasdfadfasd:{__name__}", level=2)
printMessage(f"PHASE2:{__name__}", level=2)
if __name__ == '__main__':
printMessage(f"PHASE1:{__name__}", level=2)

View File

@ -0,0 +1,13 @@
from fastapi import APIRouter
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
class MMVC_Rest_Hello:
def __init__(self):
self.router = APIRouter()
self.router.add_api_route("/api/hello", self.hello, methods=["GET"])
def hello(self):
return {"result": "Index"}

View File

@ -0,0 +1,71 @@
import base64, struct
import numpy as np
from fastapi import APIRouter
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from voice_changer.VoiceChangerManager import VoiceChangerManager
from pydantic import BaseModel
import threading
class VoiceModel(BaseModel):
gpu: int
srcId: int
dstId: int
timestamp: int
prefixChunkSize: int
buffer: str
class MMVC_Rest_VoiceChanger:
def __init__(self, voiceChangerManager:VoiceChangerManager):
self.voiceChangerManager = voiceChangerManager
self.router = APIRouter()
self.router.add_api_route("/test", self.test, methods=["POST"])
self.tlock = threading.Lock()
def test(self, voice: VoiceModel):
try:
gpu = voice.gpu
srcId = voice.srcId
dstId = voice.dstId
timestamp = voice.timestamp
prefixChunkSize = voice.prefixChunkSize
buffer = voice.buffer
wav = base64.b64decode(buffer)
if wav == 0:
samplerate, data = read("dummy.wav")
unpackedData = data
else:
unpackedData = np.array(struct.unpack(
'<%sh' % (len(wav) // struct.calcsize('<h')), wav))
# write("logs/received_data.wav", 24000,
# unpackedData.astype(np.int16))
self.tlock.acquire()
changedVoice = self.voiceChangerManager.changeVoice(
gpu, srcId, dstId, timestamp, prefixChunkSize, unpackedData)
self.tlock.release()
changedVoiceBase64 = base64.b64encode(changedVoice).decode('utf-8')
data = {
"gpu": gpu,
"srcId": srcId,
"dstId": dstId,
"timestamp": timestamp,
"prefixChunkSize": prefixChunkSize,
"changedVoiceBase64": changedVoiceBase64
}
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(content=json_compatible_item_data)
except Exception as e:
print("REQUEST PROCESSING!!!! EXCEPTION!!!", e)
print(traceback.format_exc())
return str(e)

View File

@ -4,7 +4,6 @@ import numpy as np
import socketio
from voice_changer.VoiceChangerManager import VoiceChangerManager
class MMVC_Namespace(socketio.AsyncNamespace):
def __init__(self, namespace:str, voiceChangerManager:VoiceChangerManager):
super().__init__(namespace)

View File

@ -46,7 +46,6 @@ class VoiceChanger():
def on_request(self, gpu, srcId, dstId, timestamp, prefixChunkSize, wav):
unpackedData = wav
convertSize = unpackedData.shape[0] + (prefixChunkSize * 512)
try:
audio = torch.FloatTensor(unpackedData.astype(np.float32))