def solver_generator(params):
  solver_params = {
    'batch_size': 1000,
    'eval_batch_size': 2500,
    'epochs': 10,
    'evaluate_test': True,
    'eval_flexible': False,
  }
  cnn_model(params)
  solver = hype.TensorflowSolver(data=data, hyper_params=params, **solver_params)
  return solver
Esempio n. 2
0
 def solver_generator(self, params):
   solver_params = {
     'batch_size': self.config.batch_size,
     'eval_batch_size': self.config.batch_size,
     'epochs': self.config.epochs,
     'evaluate_test': True,
     'eval_flexible': False,
     'save_dir': os.path.join(self.config.models,'optimizer-{date}-{random_id}'),
     # 'save_accuracy_limit': 0.9930,
   }
   Tiramisu(config=params)
   solver = hyper.TensorflowSolver(data=self.hyper_data, hyper_params=self.hyper_params_spec, **solver_params)
   return solver
def solver_generator(params):
    solver_params = {
        'batch_size': 200,
        'eval_batch_size': 200,
        'epochs': 20,
        'evaluate_test': True,
        'eval_flexible': False,
        'save_dir': 'temp-sms-spam/model-zoo/example-3-2-{date}-{random_id}',
        'save_accuracy_limit': 0.97,
    }
    data = rnn_model(params)
    solver = hype.TensorflowSolver(data=data,
                                   hyper_params=params,
                                   **solver_params)
    return solver
Esempio n. 4
0
def solver_generator(params):
    solver_params = {
        'batch_size': 1000,
        'eval_batch_size': 2500,
        'epochs': 10,
        'stop_condition': curve_predictor.stop_condition(),
        'result_metric': curve_predictor.result_metric(),
        'evaluate_test': True,
        'eval_flexible': False,
        'save_dir': 'temp-mnist/model-zoo/example-1-5-{date}-{random_id}',
        'save_accuracy_limit': 0.9930,
    }
    cnn_model(params)
    solver = hype.TensorflowSolver(data=data,
                                   hyper_params=params,
                                   **solver_params)
    return solver
flat = tf.reshape(pool2,
                  [-1, pool2.shape[1] * pool2.shape[2] * pool2.shape[3]])
dense = tf.layers.dense(inputs=flat, units=1024, activation=tf.nn.relu)
logits = tf.layers.dense(inputs=dense, units=10)

loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                                 labels=y),
                         name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss_op, name='minimize')
accuracy = tf.reduce_mean(tf.cast(
    tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)), tf.float32),
                          name='accuracy')

tf_data_sets = input_data.read_data_sets('temp-mnist/data', one_hot=True)
convert = lambda data_set: hype.DataSet(
    data_set.images.reshape((-1, 28, 28, 1)), data_set.labels)
data = hype.Data(train=convert(tf_data_sets.train),
                 validation=convert(tf_data_sets.validation),
                 test=convert(tf_data_sets.test))

solver_params = {
    'batch_size': 1000,
    'eval_batch_size': 2500,
    'epochs': 10,
    'evaluate_test': True,
    'eval_flexible': False,
}
solver = hype.TensorflowSolver(data=data, **solver_params)
solver.train()