def build(self, dim_input, layers=None): start_time = time.time() print("build model started") self.x_batch = tf.placeholder(tf.float32, [None, dim_input]) # ---------------- # NET VGG ONLY MLP self.fc1 = self.fc_layer(self.x_batch, 8704, 2048, "fc1") self.relu1 = tf.nn.relu(self.fc1) #self.relu1 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu6, self.dropout), lambda: self.relu6) self.fc2 = self.fc_layer_sigm(self.relu1, 2048, 4, "out") self.probVGG = self.fc2 # ------------------ # AUTOENCODER GLOBAL self.AEGlobal = AE.AEncoder(self.weight_ae_path) self.AEGlobal.build(self.x_batch, layers) # --------------------- # AUTOENCODERS BY CLASS for i in range(self.num_class): self.AEclass.append(AE.AEncoder(self.weight_ae_class_paths[i])) self.AEclass[i].build(self.x_batch, layers) self.sess.run(tf.global_variables_initializer()) print(("build model finished: %ds" % (time.time() - start_time)))
def build(self, dim_input, layers=None): start_time = time.time() print("build model started") self.x_batch = tf.placeholder(tf.float32, [None, dim_input]) # ---------------- # NET VGG ONLY MLP self.fc7 = self.fc_layer(self.x_batch, 4096, 1536, "fc7") self.relu7 = tf.nn.relu(self.fc7) self.fc8 = self.fc_layer(self.relu7, 1536, 2, "fc8") self.probVGG = tf.nn.softmax(self.fc8, name="prob") # ------------------ # AUTOENCODER GLOBAL self.AEGlobal = AE.AEncoder(self.weight_ae_path) self.AEGlobal.build(self.x_batch, layers) # --------------------- # AUTOENCODERS BY CLASS for i in range(self.num_class): self.AEclass.append(AE.AEncoder(self.weight_ae_class_paths[i])) self.AEclass[i].build(self.x_batch, layers) self.sess.run(tf.global_variables_initializer()) print(("build model finished: %ds" % (time.time() - start_time)))
def build(self, dim_input, layers): self.x_batch = tf.placeholder(tf.float32, [None, dim_input]) for i in range(self.num_class): self.AEclass.append(AE.AEncoder(self.weight_paths[i])) self.AEclass[i].build(self.x_batch, layers) self.sess.run(tf.global_variables_initializer())
def reduce_using_autoencoders(new_dim): layers = [[new_dim, 'relu']] AEncode = AE.AEncoder(path_load_weight, learning_rate=learning_rate) AEncode.build(x_batch, layers) sess.run(tf.global_variables_initializer()) train_model(AEncode, sess, data_test, objDatatest=data_test, epoch=epoch) _, _, matrix = test_model(AEncode, sess, data_test, new_dim) print(np.shape(matrix)) return matrix
max_value=Damax) # Load data test data_test = Dataset_csv(path_data=path_data_test, minibatch=mini_batch_test, max_value=Damax, random=False) # data_test = Dataset_csv(path_data=path_data_train, minibatch=mini_batch_train, max_value=Damax, random=False) with tf.Session() as sess: x_batch = tf.placeholder(tf.float32, [None, 4096]) mask = tf.placeholder(tf.float32, [None, 4096]) noise_mode = tf.placeholder(tf.bool) AEncode = AE.AEncoder(path_load_weight, learning_rate=learning_rate, noise=noise_level) AEncode.build(x_batch, mask, noise_mode, [2048, 1024]) sess.run(tf.global_variables_initializer()) print('Original Cost: ', test_model(AEncode, sess, data_test)) train_model(AEncode, sess, data_train, objDatatest=data_test, epoch=epoch) # SAVE WEIGHTs AEncode.save_npy(sess, path_save_weight) # Plot example reconstructions
# ------------------------------------------------------------------- # ENTRENAMOS EL AUTOENCODER CON AMBAS CLASES - GENERAMOS UN PESO BASE # ------------------------------------------------------------------- print('AE TRAIN ALL') print('------------') data_train = Dataset_csv(path_data=path_data_train_all, minibatch=mini_batch_train, max_value=Damax) print('Load data train...') data_test = Dataset_csv(path_data=path_data_test_all, minibatch=mini_batch_test, max_value=Damax, random=False) print('Load data test...') with tf.Session(config=c) as sess: x_batch = tf.placeholder(tf.float32, [None, dim_input]) AEncode = AE.AEncoder(path_load_weight_all, learning_rate=learning_rate_all) AEncode.build(x_batch, layers) sess.run(tf.global_variables_initializer()) print('Original Cost: ', test_model(AEncode, sess, data_test)) train_model(AEncode, sess, data_train, objDatatest=data_test, epoch=epoch_all) # SAVE WEIGHTs AEncode.save_npy(sess, path_save_weight_all) del AEncode del data_train del data_test # ------------------------------------------------------------------- # ENTRENAMOS EL AUTOENCODER CON LA CLASE 0 - BENIGNO
data_train = Dataset_csv(path_data=path_data_train_csv, minibatch=35, max_value=Damax, random=False) # data_test = Dataset_csv(path_data=path_data_test_csv, minibatch=35, max_value=Damax, restrict=False, random=False) print('[', name, ']') for xdim in dims: print(' Dim:', xdim) pathFile = xpath + name + '/' with tf.Session() as sess: weight = xpath + name + '/' + 'weight-' + str(xdim) + '.npy' layers = [[int(origDim / 2), 'relu'], [xdim, 'relu']] x_batch = tf.placeholder(tf.float32, [None, origDim]) ae = AE.AEncoder(weight, learning_rate=learning_rate) ae.build(x_batch, layers) sess.run(tf.global_variables_initializer()) # TRAIN AENCODER train_model(ae, sess, data_train, epoch=epoch) ae.save_npy(sess, weight) # SAVE AENCODER # filenameTest = name.lower() + '-test-ae2-' + str(xdim) # filenameTrain = name.lower() + '-train-ae2-' + str(xdim) # cost_tot, cost_prom = test_model_save(ae, sess, data_train, pathFile, filenameTrain) # print(' TRAIN: Dim', xdim, ': ', cost_tot, ' / ', cost_prom) # cost_tot, cost_prom = test_model_save(ae, sess, data_test, pathFile, filenameTest) # print(' TEST : Dim', xdim, ': ', cost_tot, ' / ', cost_prom) # utils.normalization_complete([pathFile + 'output_' + filenameTest + '.csv', pathFile + 'output_' + filenameTrain + '.csv'])