def get_data_all(path_data, array_max):

    data_all = Dataset_csv(path_data=path_data, max_value=array_max)
    data_all.set_minibatch(data_all.total_inputs)
    X_data, y_data = data_all.generate_batch()

    return X_data, y_data, len(y_data)
def get_data_split(path_data, array_max, test_size=0.3):
    data_all = Dataset_csv(path_data=path_data, max_value=array_max)
    data_all.set_minibatch(data_all.total_inputs)
    data, label = data_all.generate_batch()

    X_train, X_test, y_train, y_test = model_selection.train_test_split(
        data, label, test_size=test_size, random_state=42)
    return X_train, X_test, y_train, y_test, len(y_train), len(y_test)
Пример #3
0
def get_data_all(path_train, path_test, array_max):
    data_all = Dataset_csv(path_data=path_train, max_value=array_max)
    data_all.set_minibatch(data_all.total_inputs)
    X_train, y_train = data_all.generate_batch()

    data_all = Dataset_csv(path_data=path_test, max_value=array_max)
    data_all.set_minibatch(data_all.total_inputs)
    X_test, y_test = data_all.generate_batch()

    return X_train, X_test, y_train, y_test, len(y_train), len(y_test)
path_load_weight0 = '../weight/tlconvolae_dual_' + OPC + '_1.npy'
path_save_weight0 = '../weight/tlconvolae_class0_' + OPC + '_1.npy'
path_load_weight1 = '../weight/tlconvolae_dual_' + OPC + '_1.npy'
path_save_weight1 = '../weight/tlconvolae_class1_' + OPC + '_1.npy'

if __name__ == '__main__':

    mini_batch_train = 34
    mini_batch_test = 30
    epoch = 10
    learning_rate = 0.0001

    # Datos de valor maximo
    data_normal = Dataset_csv(
        path_data=[path_data_train_dual[0], path_data_test_dual[0]],
        random=False)
    Damax = data_normal.amax
    del data_normal

    # utils.generate_max_csvData([path_data_train_dual[0], path_data_test_dual[0]], path+'maximo.csv', has_label=True)
    # Damax = utils.load_max_csvData(path+'maximo.csv')

    # -------------------------------------------------------------------
    # ENTRENAMOS EL AUTOENCODER CON AMBAS CLASES - GENERAMOS UN PESO BASE
    # -------------------------------------------------------------------
    print('ConvAE TRAIN DUAL')
    print('-----------------')

    data_train = Dataset_csv(path_data=path_data_train_dual,
                             minibatch=mini_batch_train,
# ..................................................................

if __name__ == '__main__':

    path_load_weight = None
    path_save_weight = '../weight/saveAE_1.npy'

    mini_batch_train = 20
    mini_batch_test = 25
    epoch = 10
    learning_rate = 0.0001
    noise_level = 0

    # Datos de media y valor maximo
    data_normal = Dataset_csv(
        path_data=[path_data_train[0], path_data_test[0]], random=False)
    Damax = data_normal.amax
    # utils.generate_max_csvData([path_data_train[0], path_data_test[0]], path+'maximo.csv', has_label=True)
    # Damax = utils.load_max_csvData(path+'maximo.csv')

    # Load data train
    data_train = Dataset_csv(path_data=path_data_train,
                             minibatch=mini_batch_train,
                             max_value=Damax)
    # Load data test
    data_test = Dataset_csv(path_data=path_data_test,
                            minibatch=mini_batch_test,
                            max_value=Damax,
                            random=False)
    # data_test = Dataset_csv(path_data=path_data_train, minibatch=mini_batch_train, max_value=Damax, random=False)
Пример #6
0
    # data_normal = Dataset_csv(path_data=[path_data_train_all[0], path_data_test_all[0]], random=False)
    # Damax = data_normal.amax
    # del data_normal

    # utils.generate_max_csvData([path_data_train_all[0], path_data_test_all[0]], path+'maximo.csv', has_label=True)
    Damax = utils.load_max_csvData(path + 'maximo.csv')

    c = tf.ConfigProto()
    c.gpu_options.visible_device_list = "1,2"

    print('SEARCH SAMPLES')
    print('--------------')

    data = Dataset_csv(path_data=path_data_test_all,
                       minibatch=1,
                       max_value=Damax,
                       restrict=False,
                       random=False)

    with tf.device('/cpu:0'):
        with tf.Session(config=c) as sess:
            aencoder = AE.ae_multiClass(session=sess,
                                        npy_weight_paths=path_weight_ae,
                                        num_class=num_class)
            aencoder.build(dim_input=dim_input, layers=layers)
            for i in range(data.total_batchs_complete):
                x, label = data.generate_batch()

                res = aencoder.search_sample(sample=x)
                data.next_batch_test()
                print(res, label)
    #data_normal = Dataset_csv(path_data=[path_data_train_all[0], path_data_test_all[0]], random=False)
    #Damax = data_normal.amax
    #del data_normal

    # utils.generate_max_csvData([path_data_train_all[0], path_data_test_all[0]], path+'maximo.csv', has_label=True)
    Damax = utils.load_max_csvData(path+'maximo.csv')

    c = tf.ConfigProto()
    c.gpu_options.visible_device_list = "1,2"
    # -------------------------------------------------------------------
    # ENTRENAMOS EL AUTOENCODER CON AMBAS CLASES - GENERAMOS UN PESO BASE
    # -------------------------------------------------------------------
    print('AE TRAIN ALL')
    print('------------')

    data_train = Dataset_csv(path_data=path_data_train_all, minibatch=mini_batch_train, max_value=Damax)
    print('Load data train...')
    data_test = Dataset_csv(path_data=path_data_test_all, minibatch=mini_batch_test, max_value=Damax, random=False)
    print('Load data test...')

    with tf.Session(config=c) as sess:

        x_batch = tf.placeholder(tf.float32, [None, dim_input])

        AEncode = AE.AEncoder(path_load_weight_all, learning_rate=learning_rate_all)
        AEncode.build(x_batch, layers)
        sess.run(tf.global_variables_initializer())

        print('Original Cost: ', test_model(AEncode, sess, data_test))
        train_model(AEncode, sess, data_train, objDatatest=data_test, epoch=epoch_all)
Пример #8
0
    for i in range(lenght):
        f.write(','.join(map(str, X[i])) + '\n')
    f.close()


if __name__ == '__main__':
    c = tf.ConfigProto()
    c.gpu_options.visible_device_list = "1,2"

    print('CNN + AE + LSH')
    print('--------------')

    # data_train = Dataset_csv(path_data=path_data_train_all, minibatch=30, max_value=1, restrict=False, random=True)
    data = Dataset_csv(path_data=path_data_test_all,
                       minibatch=30,
                       max_value=1,
                       restrict=False,
                       random=False)

    with tf.device('/cpu:0'):
        with tf.Session() as sess:
            calsh = CAL.cnn_ae_lsh(session=sess,
                                   npy_convol_path=path_w_cnn,
                                   npy_ae_path=path_w_ae_all,
                                   npy_ae_class_paths=path_w_ae_class,
                                   normal_max_path=path_normalization_max,
                                   num_class=num_class,
                                   k_classes=1)

            calsh.build(dim_input=dim_input, layers=layers)
path_data_train = [path + 'SKINfeatures' + OPC + '_Train.csv']
path_data_test = [path + 'SKINfeatures' + OPC + '_Test.csv']
path_load_weight = None
path_save_weight = '../weight/tlmlp_' + OPC + '_1.npy'

if __name__ == '__main__':

    mini_batch_train = 20
    mini_batch_test = 30
    learning_rate = 0.0001
    epoch = 5
    num_class = 2

    # GENERATE DATA
    # Datos de media y valor maximo
    data_normal = Dataset_csv(
        path_data=[path_data_train[0], path_data_test[0]], random=False)
    Damax = data_normal.amax
    # utils.generate_max_csvData([path_data_train[0], path_data_test[0]], path+'maximo.csv', has_label=True)
    # Damax = utils.load_max_csvData(path+'maximo.csv')

    # Load data train
    data_train = Dataset_csv(path_data=path_data_train,
                             minibatch=mini_batch_train,
                             max_value=Damax,
                             restrict=True)
    # Load data test
    data_test = Dataset_csv(path_data=path_data_test,
                            minibatch=mini_batch_test,
                            max_value=Damax,
                            random=False)
    accuracy = 0
Пример #10
0
    # Damax = data_normal.amax
    # del data_normal

    # utils.generate_max_csvData([path_data_train_all[0], path_data_test_all[0]], path+'maximo.csv', has_label=True)
    # Damax = utils.load_max_csvData(path + 'maximo.csv')

    c = tf.ConfigProto()
    c.gpu_options.visible_device_list = "1,2"

    print('CNN + AE + LSH')
    print('--------------')

    #data_train = Dataset_csv(path_data=path_data_train_all, minibatch=30, max_value=1, restrict=False, random=True)
    data = Dataset_csv(path_data=path_data_test_all,
                       minibatch=30,
                       max_value=1,
                       restrict=False,
                       random=False)

    with tf.device('/cpu:0'):
        with tf.Session() as sess:
            calsh = CAL.cnn_ae_lsh(session=sess,
                                   npy_convol_path=path_w_cnn,
                                   npy_ae_path=path_w_ae_all,
                                   npy_ae_class_paths=path_w_ae_class,
                                   normal_max_path=path_normalization_max,
                                   num_class=num_class,
                                   k_classes=2,
                                   threshold=0.0002)

            calsh.build(dim_input=dim_input, layers=layers)
Пример #11
0
if __name__ == '__main__':

    mini_batch_train = 35
    mini_batch_test = 5
    epoch = 50
    learning_rate = 0.00005
    l_hidden = 16
    ratio_diff = 0.05
    pca = True

    Damax = utils.load_max_csvData(path_maximo)
    # data_train = Dataset_csv(path_data=path_data_train, minibatch=mini_batch_train, max_value=Damax, restrict=False)
    data_test = Dataset_csv(path_data=path_data_test,
                            minibatch=mini_batch_test,
                            max_value=Damax,
                            restrict=False,
                            random=False)

    Xmatrix = genfromtxt(path_data_train[0], delimiter=',')
    shape = np.shape(Xmatrix)
    Xmatrix = Xmatrix[:, :shape[1] - 1]

    XFractal = getfractal(path, path_data_test[0].split('/')[-1], Xmatrix)
    print("fractal dimension of X:", XFractal)

    c = tf.ConfigProto()
    c.gpu_options.visible_device_list = "0"
    with tf.Session() as sess:

        x_batch = tf.placeholder(tf.float32, [None, dim_input])
Пример #12
0
        if ep % 5 == 0:
            cost_tot, cost_prom = test_model(net, sess_train, objData)
            print('     Epoch', ep, ': ', cost_tot, ' / ', cost_prom)


if __name__ == '__main__':

    epoch = 21
    learning_rate = 0.00008

    for opc in range(3,4):
        path_data_train_csv, path_data_test_csv, path_max_csv, name, dims, method, origDim = path_datasets(opc)
        Damax = utils.load_max_csvData(path_max_csv)

        data_train = Dataset_csv(path_data=path_data_train_csv, minibatch=35, max_value=Damax, random=False)
        # data_test = Dataset_csv(path_data=path_data_test_csv, minibatch=35, max_value=Damax, restrict=False, random=False)
        print('[', name, ']')

        for xdim in dims:
            print('     Dim:', xdim)

            pathFile = xpath + name + '/'

            with tf.Session() as sess:
                weight = xpath + name + '/' + 'weight-' + str(xdim) + '.npy'
                layers = [[int(origDim / 2), 'relu'], [xdim, 'relu']]

                x_batch = tf.placeholder(tf.float32, [None, origDim])
                ae = AE.AEncoder(weight, learning_rate=learning_rate)
                ae.build(x_batch, layers)