Exemple #1
0
def init_model_lstm_stateful(units, layers):

    nn_config = cfg_keras.get_nn_config()
    regularizer_config = cfg.get_regularizer_config()

    #Se deberia llamar algun archivo de configuracion con los parametros de la funcion para no recibirlos de entrada
    model = Sequential()

    #model.add(LSTM(look_back*mult_cell,batch_input_shape=(batch_size,time_steps,look_back),
    model.add(
        LSTM(units,
             batch_input_shape=(nn_config['batch_size'],
                                nn_config['time_steps'],
                                nn_config['input_dim']),
             activation=nn_config['activation_function'],
             go_backwards=nn_config['go_backwards'],
             recurrent_activation=nn_config['recurrent_activation'],
             return_sequences=True,
             use_bias=nn_config['use_bias'],
             kernel_initializer=nn_config['kernel_initializer'],
             recurrent_initializer=nn_config['recurrent_initializer'],
             unit_forget_bias=nn_config['unit_forget'],
             recurrent_dropout=nn_config['recurrent_dropout'],
             kernel_regularizer=regularizers.l2(regularizer_config['l2']),
             activity_regularizer=regularizers.l1(regularizer_config['l1']),
             stateful=True))

    model.add(
        LSTM(units,
             batch_input_shape=(nn_config['batch_size'],
                                nn_config['time_steps'],
                                nn_config['input_dim']),
             activation=nn_config['activation_function'],
             go_backwards=nn_config['go_backwards'],
             recurrent_activation=nn_config['recurrent_activation'],
             return_sequences=False,
             use_bias=nn_config['use_bias'],
             kernel_initializer=nn_config['kernel_initializer'],
             recurrent_initializer=nn_config['recurrent_initializer'],
             unit_forget_bias=nn_config['unit_forget'],
             recurrent_dropout=nn_config['recurrent_dropout'],
             stateful=True))

    model.add(
        Dense(1,
              activation=nn_config['activation_function'],
              use_bias=nn_config['use_bias']))
    optimizer_config = cfg.get_optimizer_config()
    optimizer = optimizers.RMSprop(lr=optimizer_config['lr'],
                                   rho=optimizer_config['rho'],
                                   epsilon=optimizer_config['epsilon'],
                                   decay=optimizer_config['decay'])

    start = time.time()
    #model.compile(loss=loss, optimizer=optimizer,metrics=['mae'])
    compiler_config = cfg.get_compiler_config()
    model.compile(loss=compiler_config['loss'], optimizer=optimizer)
    print "Compilation Time : ", time.time() - start
    return model
def step_test(model, step):
    nn_config = cfg_keras.get_nn_config()
    step = np.reshape(step, (len(step), 1))
    step = create_timesteps(step, nn_config['time_steps'])
    step = np.reshape(
        step, (step.shape[0], nn_config['time_steps'], nn_config['input_dim']))
    prediction = model.predict(step)
    return prediction
Exemple #3
0
def init_model_lstm(units=8, layers=2):

    nn_config = cfg_keras.get_nn_config()
    regularizer_config = cfg.get_regularizer_config()

    model = Sequential()
    return_sequences = True
    for _ in range(layers):
        if _ == layers - 1:
            return_sequences = False
        model.add(
            LSTM(
                units,
                input_shape=(nn_config['time_steps'], nn_config['input_dim']),
                #model.add(LSTM(nn_config['units'],input_shape=(nn_config['input_dim'],nn_config['time_steps']),
                activation=nn_config['activation_function'],
                go_backwards=nn_config['go_backwards'],
                recurrent_activation=nn_config['recurrent_activation'],
                return_sequences=return_sequences,
                use_bias=nn_config['use_bias'],
                kernel_initializer=nn_config['kernel_initializer'],
                recurrent_initializer=nn_config['recurrent_initializer'],
                unit_forget_bias=nn_config['unit_forget'],
                recurrent_dropout=nn_config['recurrent_dropout'],
                kernel_regularizer=regularizers.l2(regularizer_config['l2']),
                activity_regularizer=regularizers.l2(
                    regularizer_config['l1'])))

    model.add(
        Dense(nn_config['output'],
              activation=nn_config['activation_function'],
              use_bias=nn_config['use_bias']))

    optimizer_config = cfg.get_optimizer_config()
    optimizer = optimizers.RMSprop(lr=optimizer_config['lr'],
                                   rho=optimizer_config['rho'],
                                   epsilon=optimizer_config['epsilon'],
                                   decay=optimizer_config['decay'])

    start = time.time()
    compiler_config = cfg.get_compiler_config()
    model.compile(loss=compiler_config['loss'], optimizer=optimizer)
    #model.compile(loss=losses.log_loss, optimizer=optimizer)
    print "Compilation Time : ", time.time() - start

    return model
    escalon = np.concatenate((left_escalon, right_escalon), axis=0)

    if (smoth_step_stimulus):
        print("entre")
        wn = cutoff_frecuency / (frecuency / 2)
        b, a = signal.butter(filter_order, wn)
        escalon = signal.lfilter(b, a, escalon)

    if (samples_stabilisation > 0):
        escalon = escalon[int(samples_stabilisation):int(samples_total) - 1]

    return np.asarray(escalon)


def plot_step_test(prediction, abp, subject, units, layers, case, order, name):
    nn_config = cfg_keras.get_nn_config()
    files_save = cfg.get_files_save(subject, units, layers, case, order,
                                    nn_config)
    #se guarda Abp con retardos y prediction en un archivo
    #files_save['file_step_txt']

    print(len(abp))
    print(len(prediction))

    with open(files_save['file_step_txt'], "w") as f:
        for i in range(0, len(prediction)):
            f.write(str(abp[i]))
            f.write("\t")
            f.write(str(prediction[i, 0]))
            f.write("\n")
Exemple #5
0
def process_subjects():

    #Matrixs for plot
    collapse_final_matrix_2gates = []
    collapse_final_matrix_2gates_flat = []
    collapse_final_matrix_2gates_mg = []

    collapse_final_matrix_3gates = []
    collapse_final_matrix_3gates_flat = []
    collapse_final_matrix_3gates_mg = []

    collapse_final_matrix_4gates = []
    collapse_final_matrix_4gates_flat = []
    collapse_final_matrix_4gates_mg = []

    label_final_matrix = []

    grid_config = cfg.get_grid_config()
    path_subjects = grid_config['path_subjects']
    #subjects = get_subjects("../Subjects_long_simulated/")
    subjects = get_subjects(path_subjects)
    i = 0
    print(subjects)
    #for subject in subjects[1:len(subjects)]:
    for subject in subjects:
        if subject != ".DS_Store":
            print("################################################")
            print("SUBJECT " + str(i))
            print("################################################")
            print()
            files = get_subject_files(path_subjects, subject)
            print(files)
            orders, cases = get_parameters(files)
            print(orders)
            print(cases)

            #push subject to queue
            global_queue.push_actual_subject(subject,
                                             global_queue.subjects_list)
            collapse_matrix_2gates, collapse_matrix_2gates_flat, collapse_matrix_2gates_mg, collapse_matrix_3gates, collapse_matrix_3gates_flat, collapse_matrix_3gates_mg, collapse_matrix_4gates, collapse_matrix_4gates_flat, collapse_matrix_4gates_mg, label_matrix = two_recurrent_layers.two_recurrent_layers(
                subject, cases, orders)
            global_queue.pop_actual_subject(global_queue.subjects_list)
            #pop subject from queue

            collapse_final_matrix_2gates = create_collapse_final_matrix(
                collapse_final_matrix_2gates, collapse_matrix_2gates)
            collapse_final_matrix_2gates_flat = create_collapse_final_matrix(
                collapse_final_matrix_2gates_flat, collapse_matrix_2gates_flat)
            collapse_final_matrix_2gates_mg = create_collapse_final_matrix(
                collapse_final_matrix_2gates_mg, collapse_matrix_2gates_mg)

            collapse_final_matrix_3gates = create_collapse_final_matrix(
                collapse_final_matrix_3gates, collapse_matrix_3gates)
            collapse_final_matrix_3gates_flat = create_collapse_final_matrix(
                collapse_final_matrix_3gates_flat, collapse_matrix_3gates_flat)
            collapse_final_matrix_3gates_mg = create_collapse_final_matrix(
                collapse_final_matrix_3gates_mg, collapse_matrix_3gates_mg)

            collapse_final_matrix_4gates = create_collapse_final_matrix(
                collapse_final_matrix_4gates, collapse_matrix_4gates)
            collapse_final_matrix_4gates_flat = create_collapse_final_matrix(
                collapse_final_matrix_4gates_flat, collapse_matrix_4gates_flat)
            collapse_final_matrix_4gates_mg = create_collapse_final_matrix(
                collapse_final_matrix_4gates_mg, collapse_matrix_4gates_mg)

            label_final_matrix = create_collapse_final_matrix(
                label_final_matrix, label_matrix)

            i += 1

    nn_config = cfg_keras.get_nn_config()

    df_1 = mds_visualization.reduction_dimensionality_2(
        collapse_final_matrix_2gates, label_final_matrix)
    df_2 = mds_visualization.reduction_dimensionality_2(
        collapse_final_matrix_2gates_flat, label_final_matrix)

    df_4 = mds_visualization.reduction_dimensionality_2(
        collapse_final_matrix_3gates, label_final_matrix)
    df_5 = mds_visualization.reduction_dimensionality_2(
        collapse_final_matrix_3gates_flat, label_final_matrix)

    df_7 = mds_visualization.reduction_dimensionality_2(
        collapse_final_matrix_4gates, label_final_matrix)
    df_8 = mds_visualization.reduction_dimensionality_2(
        collapse_final_matrix_4gates_flat, label_final_matrix)

    #Se guardan las matrices
    save_weights_matrix(collapse_final_matrix_2gates, "../Weight_matrix/",
                        nn_config['units'], nn_config['layers'],
                        "2gates_aritmetic.txt")
    save_weights_matrix(collapse_final_matrix_2gates_flat, "../Weight_matrix/",
                        nn_config['units'], nn_config['layers'],
                        "2gates_flat.txt")

    save_weights_matrix(collapse_final_matrix_3gates, "../Weight_matrix/",
                        nn_config['units'], nn_config['layers'],
                        "3gates_aritmetic.txt")
    save_weights_matrix(collapse_final_matrix_3gates_flat, "../Weight_matrix/",
                        nn_config['units'], nn_config['layers'],
                        "3gates_flat.txt")

    save_weights_matrix(collapse_final_matrix_4gates, "../Weight_matrix/",
                        nn_config['units'], nn_config['layers'],
                        "4gates_aritmetic.txt")
    save_weights_matrix(collapse_final_matrix_4gates_flat, "../Weight_matrix/",
                        nn_config['units'], nn_config['layers'],
                        "4gates_flat.txt")

    save_label_matrix(label_final_matrix, "../Weight_matrix/",
                      nn_config['units'], nn_config['layers'],
                      "label_matrix.txt")

    mds_visualization.visualization(df_1,
                                    "distancia_euclidean_2gates_aritmetic")
    mds_visualization.visualization(df_2, "distancia_euclidean_2gates_flat")

    mds_visualization.visualization(df_4,
                                    "distancia_euclidean_3gates_aritmetic")
    mds_visualization.visualization(df_5, "distancia_euclidean_3gates_flat")

    mds_visualization.visualization(df_7,
                                    "distancia_euclidean_4gates_aritmetic")
    mds_visualization.visualization(df_8, "distancia_euclidean_4gates_flat")
Exemple #6
0
def train_test_data():
    from sklearn.preprocessing import MinMaxScaler
    import config as cfg
    import config_keras as cfg_keras
    from data_utils import lectura, lineal_interpolation, create_timesteps, fold_order
    import numpy as np

    subject = global_queue.get_actual_subject(global_queue.subjects_list)
    case = global_queue.get_actual_case(global_queue.cases_list)
    order = global_queue.get_actual_order(global_queue.orders_list)

    print("in train test data case", case)
    print("in train test data subject", subject)

    grid_config = cfg.get_grid_config()
    files_config = cfg.get_files_config(grid_config['path_subjects'], subject,
                                        case)
    nn_config = cfg_keras.get_nn_config()

    # Lectura de datos de entrenamiento
    data_train = lectura(files_config['filename_train'])
    data_test = lectura(files_config['filename_test'])

    #transformacion de datos de entrenamiento y test en arreglos de float
    time_train, cbfv_train, abp_train = np.split(data_train, 3, axis=1)
    time_train = np.asarray(time_train, dtype=np.float64)
    cbfv_train = np.asarray(cbfv_train, dtype=np.float64)
    abp_train = np.asarray(abp_train, dtype=np.float64)

    time_test, cbfv_test, abp_test = np.split(data_test, 3, axis=1)
    cbfv_test = np.asarray(cbfv_test, dtype=np.float64)
    abp_test = np.asarray(abp_test, dtype=np.float64)

    sampling_time = time_train[1, 0] - time_train[0, 0]
    print("data sampling_time", sampling_time)
    if (sampling_time == 0.2):
        #se aplica interpolacion lineal a los datos
        cbfv_train = lineal_interpolation(cbfv_train)
        abp_train = lineal_interpolation(abp_train)

        cbfv_test = lineal_interpolation(cbfv_test)
        abp_test = lineal_interpolation(abp_test)

    #Se normalizan los datos para facilitar la convergencia del gradiente
    # normalize the dataset
    scaler_cbfv = MinMaxScaler(feature_range=(0, 1))
    cbfv_train = scaler_cbfv.fit_transform(cbfv_train)
    cbfv_test = scaler_cbfv.fit_transform(cbfv_test)

    scaler_abp = MinMaxScaler(feature_range=(0, 1))
    abp_train = scaler_abp.fit_transform(abp_train)
    abp_test = scaler_abp.fit_transform(abp_test)

    # reshape into X=t and Y=t+1
    trainX = create_timesteps(abp_train, nn_config['time_steps'])
    trainY = cbfv_train[nn_config['time_steps'] - 1:len(cbfv_train) - 2, 0]

    testX = create_timesteps(abp_test, nn_config['time_steps'])
    testY = cbfv_test[nn_config['time_steps'] - 1:len(cbfv_test) - 2, 0]

    #reshape_option = 1 significa que se utilizaran los retardos como time_steps, de lo contrario los retardos equivalen a la dimension de los datos

    #segun sea el orden se cambia los conjuntos de train y test
    trainX, testX, trainY, testY = fold_order(trainX, testX, trainY, testY,
                                              order, nn_config)

    print("finaliza data processing")
    return trainX, testX, trainY, testY, sampling_time, scaler_cbfv, scaler_abp
def visualization(df, name_image):
    groups = df.groupby('label')
    #cluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e'}
    colors = [
        '#B0171F', '#FF00FF', '#0000FF', '#C6E2FF', '#00FA9A', '#FFFF00',
        '#FFA500', '#030303', '#FCFCFC', '#FFBBFF'
    ]
    cluster_colors = {}
    i = 0

    for name in df['label']:
        if name == "11_recurrent_kernel":
            cluster_colors[name] = colors[0]
        elif name == "19_recurrent_kernel":
            cluster_colors[name] = colors[1]
        elif name == "55_recurrent_kernel":
            cluster_colors[name] = colors[2]
        elif name == "91_recurrent_kernel":
            cluster_colors[name] = colors[3]
        else:
            cluster_colors[name] = colors[4]

    cluster_names = {}
    for name in df['label']:
        cluster_names[name] = name

    #cluster_names = {0: '19',1: '55',2: '91'}
    fig, ax = plt.subplots(figsize=(25, 15))  # set size
    ax.margins(0.05)  # Optional, just adds 5% padding to the autoscaling
    for name, group in groups:
        ax.plot(group.x,
                group.y,
                marker='o',
                linestyle='',
                ms=12,
                label=cluster_names[name],
                color=cluster_colors[name],
                mec='none')
        ax.set_aspect('auto')

    ax.legend(numpoints=1)  #show legend with only 1 point
    for i in range(len(df)):
        ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['label'], size=8)
    fig_name = name_image + ".png"
    ax.set_xticks(np.arange(-6, 6, 0.1))
    ax.set_yticks(np.arange(-6, 6, 0.1))
    plt.grid(True)

    #Get path to save MDS plot
    path = os.getcwd()
    path, last_dir = os.path.split(path)
    path = path + "/MDS/"

    nn_config = cfg_keras.get_nn_config()
    dir_save = path + "/" + "layers_" + str(
        nn_config['layers']) + "_units_" + str(nn_config['units'])

    if not os.path.exists(dir_save):
        os.makedirs(dir_save)

    path_mds_plot = dir_save + "/" + "mds" + "_" + str(
        nn_config['layers']) + "_" + str(nn_config['units']) + "_" + fig_name

    plt.savefig(path_mds_plot)  #show the plot
Exemple #8
0
def model_lstm(trainX, testX, trainY, testY, sampling_time, scaler_cbfv,
               scaler_abp):

    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import LSTM
    from keras import regularizers
    import config as cfg
    import config_keras as cfg_keras
    from data import train_test_data
    import time
    import metrics
    import global_queue
    from keras import optimizers
    import math

    trainX, testX, trainY, testY, sampling_time, scaler_cbfv, scaler_abp = train_test_data(
    )

    nn_config = cfg_keras.get_nn_config()
    regularizer_config = cfg.get_regularizer_config()
    stateful = False

    model = Sequential()
    return_sequences = True
    for _ in range(nn_config['layers']):
        if _ == nn_config['layers'] - 1:
            return_sequences = False
        model.add(
            LSTM(nn_config['units'],
                 input_shape=(nn_config['time_steps'], nn_config['input_dim']),
                 activation={{choice(['hard_sigmoid', 'sigmoid', 'tanh'])}},
                 go_backwards=nn_config['go_backwards'],
                 recurrent_activation={{
                     choice(['hard_sigmoid', 'sigmoid', 'tanh'])
                 }},
                 return_sequences=return_sequences,
                 use_bias=nn_config['use_bias'],
                 kernel_initializer=nn_config['kernel_initializer'],
                 recurrent_initializer=nn_config['recurrent_initializer'],
                 unit_forget_bias=nn_config['unit_forget'],
                 recurrent_dropout=nn_config['recurrent_dropout'],
                 kernel_regularizer=regularizers.l2(regularizer_config['l2']),
                 activity_regularizer=regularizers.l2(
                     regularizer_config['l1'])))

    model.add(
        Dense(nn_config['output'],
              activation=nn_config['activation_function'],
              use_bias=nn_config['use_bias']))

    start = time.time()
    compiler_config = cfg.get_compiler_config()

    adam = optimizers.Adam(lr={{choice([10**-2, 10**-1])}})
    rmsprop = optimizers.RMSprop(lr={{choice([10**-3, 10**-2, 10**-1])}})

    choiceval = {{choice(['adam', 'rmsprop'])}}
    if choiceval == 'adam':
        optim = adam
    else:
        optim = rmsprop

    model.compile(loss=compiler_config['loss'], optimizer=optim)

    print "Compilation Time : ", time.time() - start
    model = model_fit.model_fit_for_search(model, trainX, trainY, testX, testY,
                                           nn_config['units'],
                                           nn_config['layers'], stateful)
    if stateful == False:
        train_predict = model.predict(trainX)
        test_predict = model.predict(testX)

    else:
        fit_config = cfg.get_fit_config()
        train_predict = model.predict(trainX,
                                      batch_size=fit_config['batch_size'])
        test_predict = model.predict(testX,
                                     batch_size=fit_config['batch_size'])

    train_predict = scaler_cbfv.inverse_transform(train_predict)
    trainY = scaler_cbfv.inverse_transform([trainY])
    test_predict = scaler_cbfv.inverse_transform(test_predict)
    testY = scaler_cbfv.inverse_transform([testY])

    corr_train, corr_test = metrics.correlations(train_predict, trainY,
                                                 test_predict, testY)
    if math.isnan(corr_test[0, 1]):
        corr_test[0, 1] = 0.5
    print('Test corr:', corr_test[0, 1])
    return {'loss': -corr_test[0, 1], 'status': STATUS_OK, 'model': model}
Exemple #9
0
def two_recurrent_layers(subject, cases, orders):
    #cases = ["11","19","55","91","99"]
    #orders = ["1","2"]
    nn_config = cfg_keras.get_nn_config()

    layers = [nn_config['layers']]
    units = [nn_config['units']]

    repeat_experiment = 10
    collapse_matrix_2gates = []
    collapse_matrix_2gates_flat = []
    collapse_matrix_2gates_mg = []

    collapse_matrix_3gates = []
    collapse_matrix_3gates_flat = []
    collapse_matrix_3gates_mg = []

    collapse_matrix_4gates = []
    collapse_matrix_4gates_flat = []
    collapse_matrix_4gates_mg = []

    label_matrix = []

    np.random.seed(997)

    for case in cases:
        global_queue.push_actual_case(case, global_queue.cases_list)
        #push case to queu
        for order in orders:
            print("order two recurrent", order)
            global_queue.push_actual_order(order, global_queue.orders_list)
            #push order to queue
            for layer in layers:
                for unit in units:
                    print
                    print
                    print "Case: " + case + ", " + "Fold: " + order + ", " + "Layers: " + str(
                        layer) + ", " + "Units: " + str(unit)
                    print
                    run_network.run_network(subject,
                                            unit,
                                            layer,
                                            case,
                                            order,
                                            collapse_matrix_2gates,
                                            collapse_matrix_2gates_flat,
                                            collapse_matrix_2gates_mg,
                                            collapse_matrix_3gates,
                                            collapse_matrix_3gates_flat,
                                            collapse_matrix_3gates_mg,
                                            collapse_matrix_4gates,
                                            collapse_matrix_4gates_flat,
                                            collapse_matrix_4gates_mg,
                                            label_matrix,
                                            stateful=False)
                    #for i in range(1,repeat_experiment):
                    #   print("experiment: ",i)
                    #   run_network.run_network(unit,layer,case,order,stateful = False)
            #pop order from queue
            global_queue.pop_actual_order(global_queue.orders_list)
        global_queue.pop_actual_case(global_queue.cases_list)
        #pop case from queue
    return collapse_matrix_2gates, collapse_matrix_2gates_flat, collapse_matrix_2gates_mg, collapse_matrix_3gates, collapse_matrix_3gates_flat, collapse_matrix_3gates_mg, collapse_matrix_4gates, collapse_matrix_4gates_flat, collapse_matrix_4gates_mg, label_matrix