from keras import backend
from attention_model import AttentionLayer
from sklearn.utils import shuffle
import numpy as np
#import data_preprocessing
from data_preprocessing import data

batch_size = 32
epo = [10, 10, 10]
flag = 0
numclass = 10

result_t, result_a = [], []

##### loading training testing data here
gakki = data(path=r'E:\Yue\Entire Data\CNMC\hospital_data')
saving_path = r'E:\Yue\Entire Data\CNMC'
gakki.auto_process()
test_label, test_text, test_audio_left, test_audio_right = gakki.get_tester()
train_label, train_text, train_audio_left, train_audio_right = gakki.get_trainer(
)

# define the operations


def weight_expand(x):
    return backend.expand_dims(x)


def weight_dot(inputs):
    x = inputs[0]
from data_preprocessing import data

batch_size = 32
epo = 100
numclass = 7

result_t, result_a = [], []

# loading training testing data here
"""
fill the data processing in this part
"""

path = r'E:/Yue/Entire Data/CNMC/result/'
lay = data(path)
train_context, train_ori = lay.get_train_data(path)
test_context, test_ori = lay.get_test_data()
test_label, test_text, test_audio_left, test_audio_right = lay.get_tester()
train_label, train_text, train_audio_left, train_audio_right = lay.get_trainer(
)

# define the operations


def fusion_weight_expand(x):
    a = np.zeros((1, 512), dtype='float32')
    a[0:] = x[0]
    t = np.zeros((1, 512), dtype='float32')
    t[0:] = x[1]
    r = np.concatenate((a, t))
Example #3
0
from __future__ import print_function
from keras.models import Model
from keras.layers import Dense
from keras.layers import Input
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.layers import Embedding
from keras.layers import GlobalMaxPooling1D
from keras.optimizers import Adam
from data_preprocessing import data
from transformer import Attention
from transformer import Position_Embedding

# Parameter setting
gakki = data(path=r'E:/Yue/Entire Data/CNMC/hospital_data')
saving_path = r'E:/Yue/Entire Data/CNMC/'

gakki.unclear_lbl.append('Monitor Vital Signs')
gakki.auto_process(merge_unclear=True)
gakki.label_mode = 'lower_10'
num_class = 11

epoch = 2000
batch_size = 32
head_num = 8
head_size = 16

# Model Architecture
# Text data
# define text input and shape
Example #4
0
@Ruiyu
2019.01.30
ToDo: Text branch only, hi mode RNN
'''
## TRAING PARAMS
batch_size = 32
epoch_count = 300
acc_flag_threshould = 60  # threshould of flag to detect in-training effects, not must
acc_collection = []  # all accuracies
work_path = 'D:/CNMC/hospital_data'
saving_path = 'D:/CNMC'
saving_name = ['/result/train_text.mat', '/result/test_text.mat']
label_mode = 'lower_10'

## LOAD DATA
cirno = data(path=work_path)  # all train/test data
cirno.auto_process(merge_unclear=True)
cirno.label_mode = label_mode
if label_mode == 'lower_10':
    numclass = 11
elif label_mode == 'h':
    numclass = len(cirno.label_dic_h)
elif label_mode == 'm':
    numclass = len(cirno.label_dic_m)
elif label_mode == 'l':
    numclass = len(cirno.label_dic_l)
else:
    numclass = len(cirno.trainer_lbl_statistics) - len(cirno.unclear_lbl) + 4
    print('>!>Warning, unknown label mode')

from data_preprocessing import data

analysis = data(path=r'E:/Yue/Entire Data/CNMC/hospital_data')
analysis.unclear_lbl.append('Monitor Vital Signs')
analysis.auto_process(merge_unclear=False)
import random
import pyexcel as pe
"""
@Ruiyu
1017 update
Trying to load data each epoch, with label averagely distributed, especially "NULL"/"NA" label.
"""
batch_size = 32
epo = [2, 100, 100]#audio,text,fusion
flag = 0
numclass = 7

result_t,result_a=[],[]

##### loading training testing data here
gakki=data(path=r'/Volumes/Detchue Base II/731/CNMC/hospital_data')
saving_path = r'/Volumes/Detchue Base II/731/CNMC'
output_data = ['/result/train_audio.mat', '/result/train_text.mat', '/result/test_audio.mat', '/result/test_text.mat']

gakki.unclear_lbl.append('Monitor Vital Signs')
gakki.auto_process(merge_unclear=True)
gakki.label_mode='lower_10'

test_label,test_text,test_audio_left,test_audio_right = gakki.get_tester(average=True)
train_label,train_text,train_audio_left,train_audio_right = gakki.get_trainer()
numclass=len(gakki.trainer_lbl_statistics)-len(gakki.unclear_lbl)+4

numclass = 11 #specialized for lower10, should delete if not this mode


# define the operations
from sklearn.metrics import confusion_matrix
'''
@Ruiyu
2019.02.18
ToDo: complete confusion matrix helper tool
'''


def get_text_confusion_matrix(model, test_text, test_label):
    pass


if __name__ == "__main__":
    saving_path = '/Volumes/Detchue Base II/731/CNMC'
    work_path = '/Volumes/Detchue Base II/731/CNMC/hospital_data'
    cirno = data(path=work_path)
    cirno.label_mode = 'lower_10'
    numclass = 11

    ####################################################### MODEL ##################################
    ## TEXT MODEL
    def weight_expand(x):
        return backend.expand_dims(x)

    def weight_dot(inputs):
        return inputs[0] * inputs[1]


    # input and its shape
    text_input = Input(shape=(30, ), name='ph1_input')
    # word embedding
Example #8
0
import numpy as np
import matplotlib.pyplot as plt
import random
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Activation, Flatten, Dropout
from tensorflow.keras.models import Sequential
from data_preprocessing import data
from tensorflow.keras.callbacks import TensorBoard
import time

m_name = "Dev-{}".format(int(time.time()))
tb = TensorBoard(log_dir='logs/{}'.format(m_name))

x_train, y_train, lables_id = data('train')
x_train, y_train, tlables_id = data('test')

rec_labels = {v: k for k, v in labels_id.items()}

#plt.imshow(x_train[7])
#title=rec_labels[y_train[7]]
#plt.title(title)
#plt.show()

x_test = np.array(x_test).reshape(-1, 32, 32, 1)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=46)
y_train = tf.keras.utils.to_categorical(y_train, num_classes=46)

x_train = x_train.astype("float") / 255.0
x_test = x_test.astype("float") / 255.0

model = Sequential()