Beispiel #1
0
                            '''if True, use avg to evaluate''')
tf.app.flags.DEFINE_boolean(
    'is_tune', False,
    '''if True, split train dataset (50K) into 45K, 5K as train/validation data'''
)
tf.app.flags.DEFINE_boolean('is_crop_flip', False,
                            '''if True, make train_data random_crop_flip''')
tf.app.flags.DEFINE_boolean('use_L2', False,
                            '''whether to use L2 regularizer''')

tf.set_random_seed(FLAGS.random_seed)

# Import CIFAR data
if FLAGS.dataset != 2 and FLAGS.is_stl10 == False:
    (train_data,
     train_labels), (test_data, test_labels) = cifar_data.load_data(
         FLAGS.dataset, FLAGS.is_tune, FLAGS.is_crop_flip)
    split_index = FLAGS.split_index if FLAGS.dataset == 10 else FLAGS.split_index
    train_labels[train_labels <= split_index] = -1  # [0, ....]
    test_labels[test_labels <= split_index] = -1
    train_labels[train_labels >= split_index + 1] = 1  # [0, ....]
    test_labels[test_labels >= split_index + 1] = 1

    train_ids = list(range(train_data.shape[0]))
    np.random.seed(123)
    np.random.shuffle(train_ids)
    train_data = train_data[train_ids]
    train_labels = train_labels[train_ids]

    # delete some samples
    num_neg = np.where(train_labels == -1)[0].shape[0]
    idx_neg_tmp = np.where(train_labels == -1)[0][:int(num_neg *
Beispiel #2
0
        offset = step * batch_size
        vali_data_batch = images[offset:offset + batch_size]
        vali_label_batch = labels[offset:offset + batch_size]
        loss, acc = sess.run([loss_op, accuracy],
                             feed_dict={
                                 X: vali_data_batch,
                                 Y: vali_label_batch,
                                 phase_train: False
                             })
        accuracy_mean.append(acc)
        loss_mean.append(loss)
    return np.mean(loss_mean), np.mean(accuracy_mean)


# Import CIFAR data
(train_data, train_labels), (test_data, test_labels) = cifar_data.load_data(
    FLAGS.dataset, FLAGS.is_tune)

# Training Parameters
initial_learning_rate = FLAGS.lr
num_iters = FLAGS.num_iters
batch_size = FLAGS.train_batch_size
inference = resnet_inference if FLAGS.model == 'resnet' else convnet_inference

# Network Parameters
configs = '_T_%d_B_%d_lr_%.4f_L2_%s_is_tune_%s_%s-%d[v%d-%s]-C%d_seed_%d' % (
    FLAGS.num_iters, batch_size, initial_learning_rate, FLAGS.use_L2,
    str(FLAGS.is_tune), FLAGS.model, FLAGS.resnet_layers, FLAGS.version,
    FLAGS.activation, FLAGS.dataset, FLAGS.random_seed)

# create tf Graph input
X = tf.placeholder(tf.float32, [batch_size, 32, 32, 3])
Beispiel #3
0
]

# Create a saver.
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5000)
grads = tf.gradients(loss_op, W)

# we don't need to load data every in the current session
cifar_data_dir = './cifar%d_data/raw_data_C%d.npy' % (FLAGS.dataset,
                                                      FLAGS.dataset)
cifar_label_dir = './cifar%d_data/raw_label_C%d.npy' % (FLAGS.dataset,
                                                        FLAGS.dataset)
if os.path.isfile(cifar_data_dir) and os.path.isfile(cifar_label_dir):
    raw_data = np.load(cifar_data_dir)
    raw_label = np.load(cifar_label_dir)
else:
    (raw_data, raw_label), (test_data, test_labels) = cifar_data.load_data(
        FLAGS.dataset, FLAGS.is_tune)
    np.save('./cifar%d_data/raw_data_C%d.npy' % (FLAGS.dataset, FLAGS.dataset),
            raw_data)
    np.save(
        './cifar%d_data/raw_label_C%d.npy' % (FLAGS.dataset, FLAGS.dataset),
        raw_label)

print('load dataset: [CIFAR%d]' % FLAGS.dataset)
num_batches = raw_data.shape[0] // batch_size

init = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                        log_device_placement=False))
sess.run(init)

model_iter = '-%s.' % str(