示例#1
0
def train_and_validate(init_learning_rate_log, weight_decay_log):
    tf.reset_default_graph()
    graph = tf.get_default_graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    hp_d['init_learning_rate'] = 10**init_learning_rate_log
    hp_d['weight_decay'] = 10**weight_decay_log

    model = ConvNet([227, 227, 3], 2, **hp_d)
    evaluator = Evaluator()
    optimizer = Optimizer(model, train_set, evaluator, val_set=val_set, **hp_d)

    sess = tf.Session(graph=graph, config=config)
    train_results = optimizer.train(sess, details=True, verbose=True, **hp_d)

    # Return the maximum validation score as target
    best_val_score = np.max(train_results['eval_scores'])

    return best_val_score
示例#2
0
""" 2. Set training hyperparameters """
hp_d = dict()

# FIXME: Training hyperparameters
hp_d['batch_size'] = 128
hp_d['num_epochs'] = 1800

hp_d['augment_train'] = True

hp_d['init_learning_rate'] = 0.2
hp_d['momentum'] = 0.9

# FIXME: Regularization hyperparameters
hp_d['weight_decay'] = 0.0001
hp_d['dropout_prob'] = 0.0

# FIXME: Evaluation hyperparameters
hp_d['score_threshold'] = 1e-4
""" 3. Build graph, initialize a session and start training """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

model = ConvNet([32, 32, 3], 10, **hp_d)
evaluator = Evaluator()
optimizer = Optimizer(model, train_set, evaluator, val_set=val_set, **hp_d)

sess = tf.Session(graph=graph, config=config)
train_results = optimizer.train(sess, details=True, verbose=True, **hp_d)
示例#3
0
# FIXME: Regularization hyperparameters
hp_d['weight_decay'] = 0.0005
hp_d['dropout_prob'] = 0.8

# FIXME: Evaluation hyperparameters
hp_d['score_threshold'] = 1e-4

old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
""" 3. Build graph, initialize a session and start training """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

model = ConvNet([28, 28, 1], 10, **hp_d)

evaluator = Evaluator()

train_set = dataset.DataSet(train_images, train_labels)
test_set = dataset.DataSet(test_images, test_labels)

# train_set = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(buffer_size=1000000).batch(hp_d['batch_size'])
# test_set = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(hp_d['batch_size'])

optimizer = Optimizer(model, train_set, evaluator, test_set=test_set, **hp_d)

sess = tf.Session(graph=graph, config=config)
train_results = optimizer.train(sess, details=True, verbose=True, **hp_d)