eval_dir = log_dir
batch_size = 128
num_classes = 10
epoch_size = 10000.0
num_iter = int(math.ceil(epoch_size / batch_size))
load_latest_checkpoint = False

eval_interval_secs = 3
run_once = False

tf.logging.set_verbosity(tf.logging.INFO)
sess = tf.Session()

## Data
with tf.device('/cpu:0'):
    d = cifar10_data(batch_size=batch_size, sess=sess)
    image_batch_tensor, target_batch_tensor = d.build_test_data_tensor(
        shuffle=False, augmentation=False)

## Model
#logits = bn_conv.inference(image_batch_tensor, num_classes=num_classes, is_training=True)
#from tensorflow.contrib.slim.nets import resnet_v2
#with slim.arg_scope(custom_ops.resnet_arg_scope(is_training=True)):
#  net, end_points = resnet_v2.resnet_v2_101(image_batch_tensor,
#                                              num_classes=num_classes,
#                                              global_pool=True)# reduce output to rank 2 (not working)
#logits = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=False)
import nets.resnet_old_reference
hps = nets.resnet_old_reference.HParams(batch_size=batch_size,
                                        num_classes=num_classes,
                                        min_lrn_rate=None,
예제 #2
0
ONLY_EVAL = False  # If True, no training is performed
# ---
EPOCHS = 500  # max number of epochs if the network never converges
learning_rate = 0.01
DECREASE_LEARNING_RATE_AFTER_N_BAD_EPOCHS = 4
DECREASE_LEARNING_RATE_N_TIMES = 3
SAVE_AFTER_MIN_N_EPOCHS = -1
LEARNING_RATE_DECAY_FACTOR = 2.0

## ----------------------------------------------------------------------------
## DATA INPUT
sess = tf.Session()

# SIMPLY UNCOMMENT THE DATASET YOU WANT TO RUN ON. NOTHING ELSE IS NEEDED.
#data = mnist_data(batch_size=BATCH_SIZE)
data = cifar10_data(batch_size=BATCH_SIZE, sess=sess)
#data = cifar100_data(batch_size=BATCH_SIZE, sess=sess)
#data = imagenet_data(batch_size=64, sess=sess) # you need to use the download sh script in utils/imagenet_download/
#data = svhn_data(batch_size=BATCH_SIZE, sess=sess)
#data = cars_data(batch_size=BATCH_SIZE, sess=sess)

with tf.device('/cpu:0'):
    train_image_batch, train_label_batch = data.build_train_data_tensor(
        shuffle=True)
    test_image_batch, test_label_batch = data.build_test_data_tensor(
        shuffle=False)

NUMBER_OF_CLASSES = data.NUMBER_OF_CLASSES
IMG_HEIGHT = data.IMAGE_HEIGHT
IMG_WIDTH = data.IMAGE_WIDTH
NUM_CHANNELS = data.NUM_OF_CHANNELS