def evaluate_once(self): session_manager = SessionManager(self.config) global_step, sess = session_manager.restore(self.saver) coord = tf.train.Coordinator() try: queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS) threads_per_qr = map(lambda qr: qr.create_threads(sess, coord=coord, daemon=True, start=True), queue_runners) threads = reduce(list.__add__, threads_per_qr) num_iter = math.ceil(self.config.dataset.set_sizes['test'] / self.config.training_params.batch_size) step = 0 acc = [] full_logits = [] full_labels = [] while step < num_iter and not coord.should_stop(): logits, labels = sess.run([self.logits, self.labels]) full_logits += [logits] full_labels += [labels] step += 1 logits = np.vstack(full_logits) labels = np.vstack(full_labels) labels = np.array(labels, np.uint8).reshape((-1)) good_logits = logits[np.arange(logits.shape[0]), labels] classirate = good_logits.sum() / logits.shape[0] print ('Classification rate:', classirate) coord.request_stop() coord.join(threads, stop_grace_period_secs=10) except Exception as e: coord.request_stop(e)
def run(self): files, labels, images = self.input_manager.get_inputs(type='submission', distorted = False, shuffle = False) with tf.variable_scope("inference"): logits = self.config.inference(images, testing=True) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage(self.config.training_params.moving_average_decay) variables_to_restore = variable_averages.variables_to_restore() self.saver = tf.train.Saver(variables_to_restore) session_manager = SessionManager(self.config) global_step, sess = session_manager.restore(self.saver) coord = tf.train.Coordinator() try: queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS) threads_per_qr = map(lambda qr: qr.create_threads(sess, coord=coord, daemon=True, start=True), queue_runners) threads = reduce(list.__add__, threads_per_qr) num_iter = math.ceil(self.config.dataset.submission_size / self.config.training_params.batch_size) step = 0 predictions_ids = [] predictions = [] while step < num_iter and not coord.should_stop(): submission_files, submission_logits = sess.run([files, logits]) submission_files = list(map(self.config.dataset.retrieve_file_id, submission_files)) predictions += [submission_logits] predictions_ids += submission_files step += 1 predictions = np.vstack(predictions) predictions = np.float32(predictions) predictions += 5e-2 row_sums = np.reshape(predictions.sum(axis=1), [-1, 1]) predictions /= row_sums df = pd.DataFrame(data=predictions) cols = list(map(lambda c: 'c' + str(c), range(10))) df.columns = cols df['img'] = predictions_ids df = df[['img'] + cols] df = df.drop_duplicates(subset = ['img'], keep='first') df.to_csv('submission.csv', index=False) coord.request_stop() coord.join(threads, stop_grace_period_secs=10) except Exception as e: coord.request_stop(e)