WIP:improve model selector

This commit is contained in:
wataru 2023-05-09 01:16:13 +09:00
parent 751234e65b
commit 426388872a
7 changed files with 6 additions and 153 deletions

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,3 @@
import { Framework } from "@dannadori/voice-changer-client-js"
import React, { useMemo } from "react" import React, { useMemo } from "react"
import { useAppState } from "../../../001_provider/001_AppStateProvider" import { useAppState } from "../../../001_provider/001_AppStateProvider"

View File

@ -123,7 +123,7 @@ export class ServerConfigurator {
}) })
} }
loadModel = async (slot: number, configFilename: string, pyTorchModelFilename: string | null, onnxModelFilename: string | null, clusterTorchModelFilename: string | null, featureFilename: string | null, indexFilename: string | null, isHalf: boolean, params: string = "{}") => { loadModel = async (slot: number, isHalf: boolean, params: string = "{}") => {
if (isHalf == undefined || isHalf == null) { if (isHalf == undefined || isHalf == null) {
console.warn("isHalf is invalid value", isHalf) console.warn("isHalf is invalid value", isHalf)
isHalf = false isHalf = false
@ -131,14 +131,7 @@ export class ServerConfigurator {
const url = this.serverUrl + "/load_model" const url = this.serverUrl + "/load_model"
const info = new Promise<ServerInfo>(async (resolve) => { const info = new Promise<ServerInfo>(async (resolve) => {
const formData = new FormData(); const formData = new FormData();
// formData.append("slot", "" + slot);
formData.append("slot", "" + slot); formData.append("slot", "" + slot);
formData.append("pyTorchModelFilename", pyTorchModelFilename || "-");
formData.append("onnxModelFilename", onnxModelFilename || "-");
formData.append("configFilename", configFilename);
formData.append("clusterTorchModelFilename", clusterTorchModelFilename || "-");
formData.append("featureFilename", featureFilename || "-");
formData.append("indexFilename", indexFilename || "-");
formData.append("isHalf", "" + isHalf); formData.append("isHalf", "" + isHalf);
formData.append("params", params); formData.append("params", params);

View File

@ -288,16 +288,10 @@ export class VoiceChangerClient {
} }
loadModel = ( loadModel = (
slot: number, slot: number,
configFilename: string,
pyTorchModelFilename: string | null,
onnxModelFilename: string | null,
clusterTorchModelFilename: string | null,
featureFilename: string | null,
indexFilename: string | null,
isHalf: boolean, isHalf: boolean,
params: string, params: string,
) => { ) => {
return this.configurator.loadModel(slot, configFilename, pyTorchModelFilename, onnxModelFilename, clusterTorchModelFilename, featureFilename, indexFilename, isHalf, params) return this.configurator.loadModel(slot, isHalf, params)
} }
//## Worklet ##// //## Worklet ##//

View File

@ -11,14 +11,6 @@ type ModelData = {
} }
export type FileUploadSetting = { export type FileUploadSetting = {
pyTorchModel: ModelData | null
onnxModel: ModelData | null
configFile: ModelData | null
clusterTorchModel: ModelData | null
feature: ModelData | null //RVC
index: ModelData | null //RVC
isHalf: boolean isHalf: boolean
uploaded: boolean uploaded: boolean
defaultTune: number defaultTune: number
@ -47,14 +39,6 @@ export type FileUploadSetting = {
} }
const InitialFileUploadSetting: FileUploadSetting = { const InitialFileUploadSetting: FileUploadSetting = {
pyTorchModel: null,
configFile: null,
onnxModel: null,
clusterTorchModel: null,
feature: null,
index: null,
isHalf: true, isHalf: true,
uploaded: false, uploaded: false,
defaultTune: 0, defaultTune: 0,
@ -308,63 +292,7 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
setUploadProgress(0) setUploadProgress(0)
setIsUploading(true) setIsUploading(true)
// ファイルをメモリにロード(dataがある場合は、キャッシュから読まれていると想定しスキップ)
const fileUploadSetting = fileUploadSettings[slot] const fileUploadSetting = fileUploadSettings[slot]
if (fileUploadSetting.onnxModel && !fileUploadSetting.onnxModel.data) {
fileUploadSetting.onnxModel.data = await fileUploadSetting.onnxModel.file!.arrayBuffer()
fileUploadSetting.onnxModel.filename = await fileUploadSetting.onnxModel.file!.name
}
if (fileUploadSetting.pyTorchModel && !fileUploadSetting.pyTorchModel.data) {
fileUploadSetting.pyTorchModel.data = await fileUploadSetting.pyTorchModel.file!.arrayBuffer()
fileUploadSetting.pyTorchModel.filename = await fileUploadSetting.pyTorchModel.file!.name
}
if (fileUploadSetting.configFile && !fileUploadSetting.configFile.data) {
fileUploadSetting.configFile.data = await fileUploadSetting.configFile.file!.arrayBuffer()
fileUploadSetting.configFile.filename = await fileUploadSetting.configFile.file!.name
}
if (fileUploadSetting.clusterTorchModel) {
if ((props.clientType == "so-vits-svc-40v2" || props.clientType == "so-vits-svc-40") && !fileUploadSetting.clusterTorchModel!.data) {
fileUploadSetting.clusterTorchModel!.data = await fileUploadSetting.clusterTorchModel!.file!.arrayBuffer()
fileUploadSetting.clusterTorchModel!.filename = await fileUploadSetting.clusterTorchModel!.file!.name
}
}
if (fileUploadSetting.feature) {
if ((props.clientType == "RVC") && !fileUploadSetting.feature!.data) {
fileUploadSetting.feature!.data = await fileUploadSetting.feature!.file!.arrayBuffer()
fileUploadSetting.feature!.filename = await fileUploadSetting.feature!.file!.name
}
}
if (fileUploadSetting.index) {
if ((props.clientType == "RVC") && !fileUploadSetting.index!.data) {
fileUploadSetting.index!.data = await fileUploadSetting.index!.file!.arrayBuffer()
fileUploadSetting.index!.filename = await fileUploadSetting.index!.file!.name
}
}
// ファイルをサーバにアップロード
const models = [
fileUploadSetting.onnxModel,
fileUploadSetting.pyTorchModel,
fileUploadSetting.clusterTorchModel,
fileUploadSetting.feature,
fileUploadSetting.index,
].filter(x => { return x != null }) as ModelData[]
for (let i = 0; i < models.length; i++) {
const progRate = 1 / models.length
const progOffset = 100 * i * progRate
await _uploadFile(models[i], (progress: number, _end: boolean) => {
// console.log(progress * progRate + progOffset, end, progRate,)
setUploadProgress(progress * progRate + progOffset)
})
}
if (fileUploadSetting.configFile) {
await _uploadFile(fileUploadSetting.configFile, (progress: number, end: boolean) => {
console.log(progress, end)
})
}
// MMVCv13 // MMVCv13
const normalModels = [ const normalModels = [
@ -414,7 +342,7 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
}, dir) }, dir)
} }
const configFileName = fileUploadSetting.configFile?.filename || "-" // const configFileName = fileUploadSetting.configFile?.filename || "-"
const params = JSON.stringify({ const params = JSON.stringify({
trans: fileUploadSetting.defaultTune || 0, trans: fileUploadSetting.defaultTune || 0,
files: { files: {
@ -443,21 +371,8 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
fileUploadSetting.isHalf = false fileUploadSetting.isHalf = false
} }
const pyTorchModel = fileUploadSetting.pyTorchModel?.filename || null
const onnxModel = fileUploadSetting.onnxModel?.filename || null
const clusterTorchModel = fileUploadSetting.clusterTorchModel?.filename || null
const feature = fileUploadSetting.feature?.filename || null
const index = fileUploadSetting.index?.filename || null
const loadPromise = props.voiceChangerClient.loadModel( const loadPromise = props.voiceChangerClient.loadModel(
slot, slot,
configFileName,
pyTorchModel,
onnxModel,
clusterTorchModel,
feature,
index,
fileUploadSetting.isHalf, fileUploadSetting.isHalf,
params, params,
) )
@ -478,27 +393,9 @@ export const useServerSetting = (props: UseServerSettingProps): ServerSettingSta
}, [fileUploadSettings, props.voiceChangerClient, props.clientType]) }, [fileUploadSettings, props.voiceChangerClient, props.clientType])
// const updateDefaultTune = (slot: number, tune: number) => {
// fileUploadSettings[slot].defaultTune = tune
// storeToCache(slot, fileUploadSettings[slot])
// setFileUploadSettings([...fileUploadSettings])
// }
const storeToCache = (slot: number, fileUploadSetting: FileUploadSetting) => { const storeToCache = (slot: number, fileUploadSetting: FileUploadSetting) => {
try { try {
const saveData: FileUploadSetting = { const saveData: FileUploadSetting = {
pyTorchModel: fileUploadSetting.pyTorchModel ? { data: fileUploadSetting.pyTorchModel.data, filename: fileUploadSetting.pyTorchModel.filename } : null,
onnxModel: fileUploadSetting.onnxModel ? { data: fileUploadSetting.onnxModel.data, filename: fileUploadSetting.onnxModel.filename } : null,
configFile: fileUploadSetting.configFile ? { data: fileUploadSetting.configFile.data, filename: fileUploadSetting.configFile.filename } : null,
clusterTorchModel: fileUploadSetting.clusterTorchModel ? {
data: fileUploadSetting.clusterTorchModel.data, filename: fileUploadSetting.clusterTorchModel.filename
} : null,
feature: fileUploadSetting.feature ? {
data: fileUploadSetting.feature.data, filename: fileUploadSetting.feature.filename
} : null,
index: fileUploadSetting.index ? {
data: fileUploadSetting.index.data, filename: fileUploadSetting.index.filename
} : null,
isHalf: fileUploadSetting.isHalf, // キャッシュとしては不使用。guiで上書きされる。 isHalf: fileUploadSetting.isHalf, // キャッシュとしては不使用。guiで上書きされる。
uploaded: false, // キャッシュから読み込まれるときには、まだuploadされていないから。 uploaded: false, // キャッシュから読み込まれるときには、まだuploadされていないから。
defaultTune: fileUploadSetting.defaultTune, defaultTune: fileUploadSetting.defaultTune,

View File

@ -11,9 +11,8 @@ from restapi.mods.FileUploader import upload_file, concat_file_chunks
from voice_changer.VoiceChangerManager import VoiceChangerManager from voice_changer.VoiceChangerManager import VoiceChangerManager
from const import MODEL_DIR, UPLOAD_DIR, ModelType from const import MODEL_DIR, UPLOAD_DIR, ModelType
from voice_changer.utils.LoadModelParams import FilePaths, LoadModelParams from voice_changer.utils.LoadModelParams import LoadModelParams
from dataclasses import fields
os.makedirs(UPLOAD_DIR, exist_ok=True) os.makedirs(UPLOAD_DIR, exist_ok=True)
os.makedirs(MODEL_DIR, exist_ok=True) os.makedirs(MODEL_DIR, exist_ok=True)
@ -75,41 +74,13 @@ class MMVC_Rest_Fileuploader:
def post_load_model( def post_load_model(
self, self,
slot: int = Form(...), slot: int = Form(...),
pyTorchModelFilename: str = Form(...),
onnxModelFilename: str = Form(...),
configFilename: str = Form(...),
clusterTorchModelFilename: str = Form(...),
featureFilename: str = Form(...),
indexFilename: str = Form(...),
isHalf: bool = Form(...), isHalf: bool = Form(...),
params: str = Form(...), params: str = Form(...),
): ):
files = FilePaths(
configFilename=configFilename,
pyTorchModelFilename=pyTorchModelFilename,
onnxModelFilename=onnxModelFilename,
clusterTorchModelFilename=clusterTorchModelFilename,
featureFilename=featureFilename,
indexFilename=indexFilename,
)
paramDict = json.loads(params) paramDict = json.loads(params)
print("paramDict", paramDict) print("paramDict", paramDict)
# Change Filepath # Change Filepath
for field in fields(files):
key = field.name
val = getattr(files, key)
if val != "-":
uploadPath = os.path.join(UPLOAD_DIR, val)
storePath = os.path.join(UPLOAD_DIR, f"{slot}", val)
storeDir = os.path.dirname(storePath)
os.makedirs(storeDir, exist_ok=True)
shutil.move(uploadPath, storePath)
setattr(files, key, storePath)
else:
setattr(files, key, None)
newFilesDict = {} newFilesDict = {}
for key, val in paramDict["files"].items(): for key, val in paramDict["files"].items():
if val != "-" and val != "": if val != "-" and val != "":
@ -122,7 +93,7 @@ class MMVC_Rest_Fileuploader:
paramDict["files"] = newFilesDict paramDict["files"] = newFilesDict
props: LoadModelParams = LoadModelParams( props: LoadModelParams = LoadModelParams(
slot=slot, isHalf=isHalf, params=paramDict, files=files slot=slot, isHalf=isHalf, params=paramDict
) )
info = self.voiceChangerManager.loadModel(props) info = self.voiceChangerManager.loadModel(props)

View File

@ -16,5 +16,4 @@ class FilePaths:
class LoadModelParams: class LoadModelParams:
slot: int slot: int
isHalf: bool isHalf: bool
files: FilePaths
params: Any params: Any