'eval_batch_size': 200, 'epochs': 20, 'evaluate_test': True, 'eval_flexible': False, 'save_dir': 'temp-sms-spam/model-zoo/example-3-2-{date}-{random_id}', 'save_accuracy_limit': 0.97, } data = rnn_model(params) solver = hype.TensorflowSolver(data=data, hyper_params=params, **solver_params) return solver hyper_params_spec = hype.spec.new( max_sequence_length=hype.spec.choice(range(20, 50)), min_frequency=hype.spec.choice([1, 3, 5, 10]), embedding_size=hype.spec.choice([32, 64, 128]), rnn_cell=hype.spec.choice(['basic_rnn', 'lstm', 'gru']), rnn_hidden_size=hype.spec.choice([16, 32, 64]), dropout_keep_prob=hype.spec.uniform(0.5, 1.0), learning_rate=10**hype.spec.uniform(-4, -3), ) strategy_params = { 'io_load_dir': 'temp-sms-spam/example-3-2', 'io_save_dir': 'temp-sms-spam/example-3-2', } tuner = hype.HyperTuner(hyper_params_spec, solver_generator, **strategy_params) tuner.tune()
layer = tf.add(tf.matmul(tf.sigmoid(layer), weights[2]), biases[2]) output = tf.add(tf.matmul(tf.sigmoid(layer), weights['output']), biases['output']) cost = tf.reduce_mean(tf.squared_difference(output, y), name='loss') optimizer = tf.train.GradientDescentOptimizer( learning_rate=params.learning_rate).minimize(cost, name='minimize') tf.reduce_mean(tf.cast(tf.abs(output - y) < 0.5, tf.float32), name='accuracy') def solver_generator(params): solver_params = { 'batch_size': 167, 'epochs': 50, 'evaluate_test': True, 'eval_flexible': True, } dnn_model(params) solver = hype.TensorflowSolver(data=data, hyper_params=params, **solver_params) return solver hyper_params_spec = hype.spec.new(learning_rate=10**hype.spec.uniform(-1, -3), ) tuner = hype.HyperTuner(hyper_params_spec, solver_generator) tuner.tune()