def load_images(self): """ load visualization samples. :return next_element_train (generator): can iter images and labels. """ loader = LoadData(self.batch_size, self.sess) next_element_train = loader.get_data(self.train_files) return next_element_train
def Caculate(self, sess, files, accuracy_tensor, Cost_tensor, data_placeholder, labels_placeholder, rate_placeholder, model='train'): """ Calculate accuracy and loss. :param sess : tensor graph. :param files (ndarray): score data. :param accuracy_tensor (tensor): accuracy tensor function graph. :param Cost_tensor (tensor): cost tensor function graph. :param data_placeholder (tensor): data placeholder. :param labels_placeholder (tensor): labels placeholder. :param rate_placeholder (tensor): rate placeholder. :param model (string): can choose 'train' or 'test' to scored model. :return acc_mean,loss_mean (float): mean accuracy and mean loss. """ if model == 'train': N = self.N_train else: N = self.N_test loader = LoadData(self.batch_size, sess) next_element_ = loader.get_data(files) acc, loss, count = 0, 0, 1 while 1: try: images, labels = sess.run(next_element_) print('Score {}/{} \r'.format(count, N), end='', flush=True) count += 1 except tf.errors.OutOfRangeError: break else: acc_, loss_ = sess.run( [accuracy_tensor, Cost_tensor], feed_dict={ data_placeholder: images, labels_placeholder: labels, rate_placeholder: 0 }) acc += acc_ loss += loss_ acc_mean = acc / count loss_mean = loss / count return acc_mean, loss_mean
def fit(self, lr, epochs, drop_rate): """ fitting model. :param lr (float): learning rate. :param epochs (int): Iterate of epoch. :param drop_rate (float): dropout rate. rate = 1 - keep_prob. :return: """ # create placeholder data = tf.placeholder(tf.float32, [None, 227, 227, 3], name='Input') labels = tf.placeholder(tf.float32, [None, 1], name='Labels') rate = tf.placeholder(tf.float32, name='rate') # build model. params = self.init_parameters() out = self.forward(data, params, rate) Cost = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=out, labels=labels)) optimizer = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(Cost) # score. predict = tf.round(tf.sigmoid(out)) equal = tf.equal(labels, predict) correct = tf.cast(equal, tf.float32) accuracy = tf.reduce_mean(correct) # split date.. split_data = SplitData(self.file_dir, Load_samples=self.Load_samples, test_rate=self.test_rate) train_files, test_files = split_data() self.N_train = len(train_files) // self.batch_size self.N_test = len(test_files) // self.batch_size # Saver.. saver = tf.train.Saver() tf.add_to_collection('pre_network', out) init = tf.global_variables_initializer() # training... with tf.Session() as sess: sess.run(init) for epoch in range(epochs): loader = LoadData(self.batch_size, sess) next_element_train = loader.get_data(train_files) # running all training set... count = 1 while 1: try: images, target = sess.run(next_element_train) print('Training {}/{} \r'.format(count, self.N_train), end='', flush=True) count += 1 except tf.errors.OutOfRangeError: break else: _ = sess.run(optimizer, feed_dict={ data: images, labels: target, rate: drop_rate }) acc_train, loss_train = self.Caculate(sess, train_files, accuracy, Cost, data, labels, rate, 'train') acc_test, loss_test = self.Caculate(sess, test_files, accuracy, Cost, data, labels, rate, 'test') print( '[{}/{}] train loss:{:.4f} - train acc:{:.4f} - test loss:{:.4f} - test acc:{:.4f}' .format(epoch + 1, epochs, loss_train, acc_train, loss_test, acc_test)) if acc_train >= 0.980: break # Saver .... saver.save(sess, 'model/alexNet')