Beispiel #1
0
def mean_iou(y_true, y_pred):
    prec = []
    for t in np.arange(0.5, 1.0, 0.05):
        y_pred_ = tf.to_int32(y_pred > t)
        score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
        K.get_session().run(tf.local_variables_initializer())
        with tf.control_dependencies([up_opt]):
            score = tf.identity(score)
        prec.append(score)
    return K.mean(K.stack(prec), axis=0)


model_dir = '../../100sw_model'
model_path = '../../100sw_model/AV_100sw.h5'
test_dataset = '../../pre_data/audio_video/tr_set.hdf5'
test_generator = data_generator(test_dataset, 1)
for i in range(1):
    [input_spec_mix, input_face_1,
     input_face_2], [output_spec_1, output_spec_2] = next(test_generator)
converter = MelConverter()
AV = load_model(model_path, custom_objects={'tf': tf, 'mean_iou': mean_iou})
#############################################################
import librosa


def spectrogram_separator(input_spec_mix, input_face_1, input_face_2,
                          output_spec_1, output_spec_2):
    ### predict
    mask_pre_1, mask_pre_2 = AV.predict(
        [input_spec_mix, input_face_1, input_face_2])
    #    mask_sum = mask_pre_1 + mask_pre_2
sess = tf.Session(config=config)
keras.backend.set_session(sess)
import keras.layers as layers
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, TensorBoard
import h5py
from load_dataset import data_generator
import os

train_dataset = '../../new_dataset/audio_video/tr_set.hdf5'
val_dataset = '../../new_dataset/audio_video/val_set.hdf5'
test_dataset = '../../new_dataset/audio_video/test_set.hdf5'

batch_size = 10
epochs = 100

train_generator = data_generator(train_dataset, batch_size)
val_generator = data_generator(val_dataset, batch_size)

##V:video1 VV:video2
V1 = layers.Input(shape=(75, 512), name='Video1_input')
V2 = layers.Conv1D(256,
                   kernel_size=7,
                   dilation_rate=1,
                   padding='same',
                   activation='relu')(V1)
V3 = layers.BatchNormalization(axis=-1)(V2)
V4 = layers.Conv1D(256,
                   kernel_size=5,
                   dilation_rate=1,
                   padding='same',
                   activation='relu')(V3)
Beispiel #3
0
@author: macfa
"""

h5py_path = '../../Unetdata/dataset'
tr_path = h5py_path + '/' + 'tr_set.hdf5'
val_path = h5py_path + '/' + 'val_set.hdf5'
test_path = h5py_path + '/' + 'test_set.hdf5'


batch_size = 10
################################DATA
from keras.layers import Lambda
import numpy as np
from load_dataset import data_generator

train_generator = data_generator(tr_path, batch_size)
val_generator = data_generator(val_path, batch_size)
test_generator = data_generator(test_path, batch_size)

##############################DATA
##############################Model
import os
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras import backend as K
import tensorflow as tf