Exemplo n.º 1
0
    def __init__(self,
                 session,
                 npy_convol_path=None,
                 npy_ae_path=None,
                 npy_ae_class_paths=None,
                 normal_max_path=None,
                 trainable=False,
                 num_class=0,
                 k_classes=1,
                 threshold=0):

        if npy_convol_path is not None:
            self.data_dict = np.load(npy_convol_path, encoding='latin1').item()
            print("npy file loaded")
        else:
            self.data_dict = None
            print("random weight")

        if normal_max_path is not None:
            self.normalization_max = utils.load_max_csvData(normal_max_path)
        else:
            self.normalization_max = 1
            print("Data no normalization")

        self.var_dict = {}
        self.trainable = trainable
        self.weight_ae_path = npy_ae_path
        self.weight_ae_class_paths = npy_ae_class_paths
        self.num_class = num_class
        self.AEclass = []
        self.sess = session

        self.k = k_classes
        self.threshold = threshold
Exemplo n.º 2
0
for i in range(num_class):
    path_weight_ae.append(path_weight + 'vggAE_class' + str(i) + '.npy')

assert os.path.exists(path), print('No existe el directorio de datos ' + path)
assert os.path.exists(path_weight), print('No existe el directorio de pesos ' +
                                          path_weight)

if __name__ == '__main__':

    # Datos de valor maximo
    # data_normal = Dataset_csv(path_data=[path_data_train_all[0], path_data_test_all[0]], random=False)
    # Damax = data_normal.amax
    # del data_normal

    # utils.generate_max_csvData([path_data_train_all[0], path_data_test_all[0]], path+'maximo.csv', has_label=True)
    Damax = utils.load_max_csvData(path + 'maximo.csv')

    c = tf.ConfigProto()
    c.gpu_options.visible_device_list = "1,2"

    print('SEARCH SAMPLES')
    print('--------------')

    data = Dataset_csv(path_data=path_data_test_all,
                       minibatch=1,
                       max_value=Damax,
                       restrict=False,
                       random=False)

    with tf.device('/cpu:0'):
        with tf.Session(config=c) as sess:
    X_test, y_test = data_all.generate_batch()

    return X_train, X_test, y_train, y_test, len(y_train), len(y_test)


if __name__ == '__main__':

    path_logs = xpath + 'resultClasify2.csv'
    f = open(path_logs, 'a')

    for i in range(0, 3):
        path_data_train_csv, path_data_test_csv, path_max_csv, name = path_datasets(
            i)

        print('\n[NAME:', name, ']')
        Damax = utils.load_max_csvData(path_max_csv)

        # Metodo 1
        # X_train, X_test, y_train, y_test, total_train, total_test = get_data_split(path_data_test_csv, Damax, 0.3)
        # Metodo 2
        X_train, X_test, y_train, y_test, total_train, total_test = get_data_all(
            path_data_train_csv, path_data_test_csv, Damax)
        print(np.shape(X_train), np.shape(X_test))

        knn = neighbors.KNeighborsClassifier()
        print("     Train model...")
        knn.fit(X_train, y_train)
        print("     Test model...")
        Z = knn.predict(X_test)

        acc = utils.metrics_multiclass(y_test, Z)