def bulid_Net(image):
    image = tf.reshape(image, [-1, 32, 32, 3])
    with tf.variable_scope(name_or_scope='CifarNet', reuse=tf.AUTO_REUSE):
        arg_scope = cifarnet.cifarnet_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_point = cifarnet.cifarnet(image)
            probs = tf.nn.softmax(logits)  # probabilities
    return logits, probs, end_point
Esempio n. 2
0
def _test(sess):
    image_placeholder = tf.placeholder(tf.float32,
                                       [FLAGS.batch_size, 32, 32, 3])
    label_placeholder = tf.placeholder(tf.float32,
                                       [FLAGS.batch_size, FLAGS.class_number])
    with slim.arg_scope(cifarnet.cifarnet_arg_scope()):
        logits, endpoints = cifarnet.cifarnet(image_placeholder)
    top_k_op = tf.nn.in_top_k(logits, label_placeholder, FLAGS.top_k)
    variable_to_restore = slim.get_model_variables()
    restorer = tf.train.Saver(variable_to_restore)
    checkpoint = tf.train.latest_checkpoint(FLAGS.logdir)
    restorer.restore(sess, checkpoint)
    print('---Successfully restored from %s---' % FLAGS.lordir)
    dataset = DatasetCifar(FLAGS.dataset_dir,
                           one_hot=True,
                           class_number=FLAGS.class_number)
    batch_count = 0
    true_count = 0
    while True:
        try:
            test_images, test_labels = dataset.next_test_batch()
            test_images = np.array(test_images)
            test_labels = np.array(test_labels)
            predictions = sess.run(top_k_op,
                                   feed_dict={
                                       image_placeholder: test_iamges,
                                       label_placeholder: test_labels
                                   })
            true_count += np.sum(predictions)
            batch_count += 1
        except OutOfRangeError:
            precision = true_count / (batch_count * FLAGS.batch_size)
            print('-----------------------------')
            print('| Precision is %f |' % precision)
            print('-----------------------------')
            break
Esempio n. 3
0
def _train(sess):
    with tf.name_scope('data_input'):
        image_batch, label_batch = setup_train_input(sess)

    with tf.name_scope('Network'):
        with slim.arg_scope(cifarnet.cifarnet_arg_scope()):
            logits, end_points = cifarnet.cifarnet(image_batch)
    with tf.name_scope('X_entropy_loss'):
        x_entropy_loss = slim.losses.softmax_cross_entropy(
            label_batch,
            logits,
            label_smoothing=FLAGS.label_smoothing,
            weights=1.0)
    with tf.name_scope('total_loss'):
        total_loss = slim.losses.get_total_loss()
    with tf.name_scope('global_step'):
        global_step = tf.train.get_or_create_global_step()
        incr_global_step = tf.assign(global_step, global_step + 1)
    with tf.name_scope('train'):
        learning_rate = _configure_learning_rate(
            50000, global_step)  #to be modified with val
        optimizer = _configure_optimizer(learning_rate)
        var_to_train = [var for var in tf.trainable_variables()]
        gradients = optimizer.compute_gradients(total_loss,
                                                var_list=var_to_train)
        train_op = optimizer.apply_gradients(gradients)

    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    saver = tf.train.Saver(max_to_keep=5)

    with tf.name_scope('Summary'):
        # Add summaries for end_points.
        for end_point in end_points:
            x = end_points[end_point]
            tf.summary.histogram('activations/' + end_point, x)
            tf.summary.scalar('sparsity/' + end_point, tf.nn.zero_fraction(x))
        # Add summaries for losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES):
            tf.summary.scalar('losses/%s' % loss.op.name, loss)
        tf.summary.scalar('total_loss', total_loss)
        # Add summaries for variables.
        for variable in slim.get_model_variables():
            tf.summary.histogram(variable.op.name, variable)
        tf.summary.scalar('learning_rate', learning_rate)
        summary_writer = tf.summary.FileWriter(FLAGS.logdir, graph=sess.graph)
        summary_op = tf.summary.merge_all()
    step = 0
    max_step = math.ceil(50000 * FLAGS.num_epoch * 10 / FLAGS.batch_size)
    start_time = time.time()
    print('Start training...')
    print('Batch size: %d, number of epoch: %d' %
          (FLAGS.batch_size, FLAGS.num_epoch))
    while True:
        try:
            options = None
            run_metadata = None
            if should_log(FLAGS.trace_freq, step, max_step):
                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
            fetches = {
                'train': train_op,
                'global_step': global_step,
                'incr_global_step': incr_global_step
            }
            if should_log(FLAGS.log_freq, step, max_step):
                fetches['x_entropy_loss'] = x_entropy_loss
                fetches['total_loss'] = total_loss
            if should_log(FLAGS.sum_freq, step, max_step):
                fetches['summary'] = summary_op
            results = sess.run(fetches,
                               options=options,
                               run_metadata=run_metadata)
            current_time = time.time()
            process_time = current_time - start_time
            remain_time = (max_step - step + 1) * process_time / (step + 1)
            if should_log(FLAGS.log_freq, step, max_step):
                print(
                    '-------------------------------------------------------------------------------'
                )
                print(
                    'Global step: %d, X_loss: %.4f, total loss: %.4f, process time: %d mins, remain time: %d mins'
                    % (results['global_step'], results['x_entropy_loss'],
                       results['total_loss'], process_time / 60,
                       remain_time / 60))
            if should_log(FLAGS.sum_freq, step, max_step):
                summary_writer.add_summary(results['summary'])
            if should_log(FLAGS.trace_freq, step, max_step):
                print('Recording trace...')
                summary_writer.add_run_metadata(
                    run_metadata, 'step_%d' % results['global_step'])
            if should_log(FLAGS.save_freq, step, max_step):
                print('Saving model...')
                saver.save(sess,
                           os.path.join(FLAGS.logdir, 'model'),
                           global_step=results['global_step'])
            step = step + 1
        except tf.errors.OutOfRangeError:
            print(
                '----------------------------------------------------------------------------------'
            )
            print('Done training!')
            current_time = time.time()
            process_time = current_time - start_time
            print(
                'Total training time is %d hours and %d minutes' %
                (math.floor(process_time / 3600),
                 (process_time - math.floor(process_time / 3600) * 3600) / 60))
            break
y_valid, y_train = y_train[:50], y_train[50:300]
X_test = X_test[:50]
y_test = y_test[:50]

reset_graph()
X = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name="X")
y = tf.placeholder(tf.int32, shape=[None], name="y")
step = tf.placeholder(tf.float32, name='step')
n_epochs = 50
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty

with tf.name_scope("resnet"):
    with slim.arg_scope(cifarnet.cifarnet_arg_scope()):
        logits, end_points = cifarnet.cifarnet(X)
with tf.name_scope("loss"):
    xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=y)
    loss = tf.reduce_mean(xentropy)
with tf.name_scope("train"):
    change_lr = lr_step(step)
    optimizer = tf.train.MomentumOptimizer(
        learning_rate=change_lr, momentum=0.9)
    training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
    correct = tf.nn.in_top_k(logits, y, 1)
    accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()