Esempio n. 1
0
def train_and_validate(init_learning_rate_log, weight_decay_log):
    tf.reset_default_graph()
    graph = tf.get_default_graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    hp_d['init_learning_rate'] = 10**init_learning_rate_log
    hp_d['weight_decay'] = 10**weight_decay_log

    model = ConvNet([227, 227, 3], 2, **hp_d)
    evaluator = Evaluator()
    optimizer = Optimizer(model, train_set, evaluator, val_set=val_set, **hp_d)

    sess = tf.Session(graph=graph, config=config)
    train_results = optimizer.train(sess, details=True, verbose=True, **hp_d)

    # Return the maximum validation score as target
    best_val_score = np.max(train_results['eval_scores'])

    return best_val_score
print((test_set.labels[:, 1] == 0).sum(), (test_set.labels[:, 1] == 1).sum())
""" 2. Set test hyperparameters """
hp_d = dict()
image_mean = np.load('/tmp/asirra_mean.npy')  # load mean image
hp_d['image_mean'] = image_mean

# FIXME: Test hyperparameters
hp_d['batch_size'] = 256
hp_d['augment_pred'] = True
""" 3. Build graph, load weights, initialize a session and start test """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True

model = ConvNet([227, 227, 3], 2, **hp_d)
evaluator = Evaluator()
saver = tf.train.Saver()

sess = tf.Session(graph=graph, config=config)
saver.restore(sess, '/tmp/model.ckpt')  # restore learned weights
'''
img_url = [
    'http://assets.shockpedia.com/app/uploads/2017/10/29091359/puppy-1.jpg',
    'https://vetstreet.brightspotcdn.com/dims4/default/5b3ffe7/2147483647/thumbnail/180x180/quality/90/?url=https%3A%2F%2Fvetstreet-brightspot.s3.amazonaws.com%2F8e%2F4e3910c36111e0bfca0050568d6ceb%2Ffile%2Fhub-dogs-puppy.jpg',
    'https://www.petfinder.com/images/breeds/dog/1460.jpg',
    'https://d4n5pyzr6ibrc.cloudfront.net/media/27FB7F0C-9885-42A6-9E0C19C35242B5AC/4785B1C2-8734-405D-96DC23A6A32F256B/thul-90efb785-97af-5e51-94cf-503fc81b6940.jpg?response-content-disposition=inline',
    'http://www.bristol.ac.uk/media-library/sites/vetscience/migrated/images/catstudymonte.jpg',
    'https://ichef.bbci.co.uk/images/ic/480xn/p04gr933.jpg'
]
    
Esempio n. 3
0
# Load test set
X_test, y_test = dataset.read_data(test_dir, IM_SIZE)
test_set = dataset.DataSet(X_test, y_test)
""" 2. Set test hyperparameters """
hp_d = dict()

# FIXME: Test hyperparameters
hp_d['batch_size'] = 8
""" 3. Build graph, load weights, initialize a session and start test """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

model = ConvNet([IM_SIZE[0], IM_SIZE[1], 3], NUM_CLASSES, **hp_d)
evaluator = Evaluator()
saver = tf.train.Saver()

sess = tf.Session(graph=graph, config=config)
saver.restore(sess, './model.ckpt')  # restore learned weights
test_y_pred = model.predict(sess, test_set, **hp_d)
test_score = evaluator.score(test_set.labels, test_y_pred)

print('Test accuracy: {}'.format(test_score))
""" 4. Draw masks on image """
draw_dir = os.path.join(test_dir, 'draws')  # FIXME
if not os.path.isdir(draw_dir):
    os.mkdir(draw_dir)
im_dir = os.path.join(test_dir, 'images')  # FIXME
im_paths = []
Esempio n. 4
0
""" 2. Set training hyperparameters """
hp_d = dict()

# FIXME: Training hyperparameters
hp_d['batch_size'] = 128
hp_d['num_epochs'] = 1800

hp_d['augment_train'] = True

hp_d['init_learning_rate'] = 0.2
hp_d['momentum'] = 0.9

# FIXME: Regularization hyperparameters
hp_d['weight_decay'] = 0.0001
hp_d['dropout_prob'] = 0.0

# FIXME: Evaluation hyperparameters
hp_d['score_threshold'] = 1e-4
""" 3. Build graph, initialize a session and start training """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

model = ConvNet([32, 32, 3], 10, **hp_d)
evaluator = Evaluator()
optimizer = Optimizer(model, train_set, evaluator, val_set=val_set, **hp_d)

sess = tf.Session(graph=graph, config=config)
train_results = optimizer.train(sess, details=True, verbose=True, **hp_d)
# image_mean = np.load('/tmp/data_mean.npy')
anchors = dataset.load_json(os.path.join(test_dir, 'anchors.json'))
class_map = dataset.load_json(os.path.join(test_dir, 'classes.json'))
nms_flag = True
hp_d = dict()
# hp_d['image_mean'] = image_mean
hp_d['batch_size'] = 16
hp_d['nms_flag'] = nms_flag
""" 3. Build graph, load weights, initialize a session and start test """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

model = ConvNet([IM_SIZE[0], IM_SIZE[1], 3],
                NUM_CLASS,
                anchors,
                grid_size=(IM_SIZE[0] // 32, IM_SIZE[1] // 32))
saver = tf.train.Saver()

sess = tf.Session(graph=graph, config=config)
saver.restore(sess, '/tmp/model.ckpt')
test_y_pred = model.predict(sess, test_set, **hp_d)
""" 4. Draw boxes on image """
draw_dir = os.path.join(test_dir, 'draws')
for idx, (img, y_pred) in enumerate(zip(test_set.images, test_y_pred)):
    draw_path = os.path.join(draw_dir, '{}_test_images.png'.format(idx + 1))
    if nms_flag:
        bboxes = predict_nms_boxes(y_pred)
    else:
        bboxes = convert_boxes(y_pred)
    bboxes = bboxes[np.nonzero(np.any(bboxes > 0, axis=1))]
Esempio n. 6
0
hp_d['augment_train'] = True
hp_d['augment_pred'] = True

hp_d['init_learning_rate'] = 0.01
hp_d['momentum'] = 0.9
hp_d['learning_rate_patience'] = 30
hp_d['learning_rate_decay'] = 0.1
hp_d['eps'] = 1e-8

# FIXME: Regularization hyperparameters
hp_d['weight_decay'] = 0.0005
hp_d['dropout_prob'] = 0.5

# FIXME: Evaluation hyperparameters
hp_d['score_threshold'] = 1e-4


""" 3. Build graph, initialize a session and start training """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True

model = ConvNet([227, 227, 3], 13, **hp_d) # num_classes = 13
evaluator = Evaluator()
optimizer = Optimizer(model, train_set, evaluator, val_set=val_set, **hp_d)

sess = tf.Session(graph=graph, config=config)
train_results = optimizer.train(sess, details=True, verbose=True, **hp_d)
Esempio n. 7
0
# FIXME: Regularization hyperparameters
hp_d['weight_decay'] = 0.0005
hp_d['dropout_prob'] = 0.8

# FIXME: Evaluation hyperparameters
hp_d['score_threshold'] = 1e-4

old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
""" 3. Build graph, initialize a session and start training """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

model = ConvNet([28, 28, 1], 10, **hp_d)

evaluator = Evaluator()

train_set = dataset.DataSet(train_images, train_labels)
test_set = dataset.DataSet(test_images, test_labels)

# train_set = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(buffer_size=1000000).batch(hp_d['batch_size'])
# test_set = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(hp_d['batch_size'])

optimizer = Optimizer(model, train_set, evaluator, test_set=test_set, **hp_d)

sess = tf.Session(graph=graph, config=config)
train_results = optimizer.train(sess, details=True, verbose=True, **hp_d)
Esempio n. 8
0
# SNAITY CHECK
print('trainval_list', trainval_list[0:5])

train_set = dataset.LazyDataset(root_dir, trainval_list, anno_dict, rescale=False)
test_set = dataset.LazyDataset(root_dir, test_list, anno_dict, rescale=False)

Ch, H, W = test_set.__getitem__(0)[0].shape

# EMNIST NETWORK
hp_d = dict() # hyperparameters for a network
mean = np.load(mean_dir)
hp_d['image_mean'] = np.transpose(np.tile(mean,(H,W,1)),(2,0,1))
hp_d['is_train'] = False

net = ConvNet((Ch,H,W), len(anno_dict['classes']), **hp_d)
net.logits.restore(net_dir)

# EMNIST NETWORK: SANITY CHECK
start_time = time.time()
ys, y_preds, test_score, confusion_matrix = net.predict(test_set, **hp_d)
total_time = time.time() - start_time

print('Test error rate: {}'.format(test_score))
print('Total tack time(sec): {}'.format(total_time))
print('Tact time per image(sec): {}'.format(total_time / len(test_list)))
print('Confusion matrix: \n{}'.format(confusion_matrix))

# SHOW TOP-K SAMPLES

for idx_test in idx_tests:
    anno_dict = json.load(fid)
#with open('/Data/emnist/balanced/original/annotation/annotation1_wp_0.3.json','r') as fid:
#    noisy_anno_dict = json.load(fid)
    
train_set = dataset.LazyDataset(root_dir, trainval_list, anno_dict)
test_set = dataset.LazyDataset(root_dir, test_list, anno_dict)
    
# emnist dataset: SANITY CHECK
print(len(test_set), type(test_set))
print(len(test_list))

# emnist network
from models.nn import VGG as ConvNet

hp_d = dict() # hyperparameters for a network
net = ConvNet(train_set.__getitem__(0)[0].shape, len(anno_dict['classes']), **hp_d)
net.logits.restore('/Data/checkpts/noisy/model_fold_1_trainval_ratio_0.0.dnn')

# emnist network: SANITY CHECK
start_time = time.time()
ys, y_preds, test_score, confusion_matrix = net.predict(test_set, **hp_d)
total_time = time.time() - start_time

print('Test error rate: {}'.format(test_score))
print('Total tack time(sec): {}'.format(total_time))
print('Tact time per image(sec): {}'.format(total_time / len(test_list)))
print('Confusion matrix: \n{}'.format(confusion_matrix))

# t-SNE check

for fn in ['if_cg_logreg', 'if_se_logreg', 'if_v_cos', 'if_v']: