Ejemplo n.º 1
0
def main(layers, training_iterations, test_data_file, train_data_file, validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["RHC"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    nnop = NeuralNetworkOptimizationProblem(
        data_set, classification_network, measure)
    oa = RandomizedHillClimbing(nnop)
    base.train(oa, classification_network, 'RHC', training_ints, validation_ints, testing_ints, measure,
               training_iterations, OUTFILE.format(data_name, 'RHC'))
    return
def main(layers, training_iterations, test_data_file, train_data_file,
         validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    base.train(
        BatchBackPropagationTrainer(data_set, classification_network, measure,
                                    rule), classification_network, 'Backprop',
        training_ints, validation_ints, testing_ints, measure,
        training_iterations, OUTFILE.format(data_name, 'Backprop'))
    return
Ejemplo n.º 3
0
def main(_):
    # Init
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    np.random.seed(1)
    random.seed(a=1, version=2)

    # Load configuration
    config = get_parameters(reproduce=None, gpu=-1)
    print(config)

    # Load Dataset
    data = KBDataset(config.data)
    print(data)

    # tensorflow config
    #tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(
    #    visible_device_list=str(config.gpu)))
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.Session(config=tf_config)
    

    # Model Loading
    if config.model == "transe":
        model = TransE(config, data.nent, data.nrel)
    else:
        model = TorusE(config, data.nent, data.nrel)
    optimizer = tf.train.GradientDescentOptimizer(config.lr)
    # global_step = tf.Variable(0, name="gb", trainable=False)
    cal_gradient = optimizer.compute_gradients(model.loss)
    train_opt = optimizer.apply_gradients(cal_gradient)

    # Config Saver and Session
    saver = tf.train.Saver(max_to_keep=100)
    sess.run(tf.global_variables_initializer())

    # Training
    base.train(data, model, train_opt, config, sess, saver)

    # Testing
    base.test(data, model, sess)
Ejemplo n.º 4
0
def main(P, mate, mutate, layers, training_iterations, test_data_file,
         train_data_file, validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_name = "GA_{}_{}_{}".format(P, mate, mutate)
    classification_network = factory.createClassificationNetwork(layers, relu)
    nnop = NeuralNetworkOptimizationProblem(data_set, classification_network,
                                            measure)
    oa = StandardGeneticAlgorithm(P, mate, mutate, nnop)
    base.train(oa, classification_network, oa_name, training_ints,
               validation_ints, testing_ints, measure, training_iterations,
               OUTFILE.format(data_name, oa_name))
    return
Ejemplo n.º 5
0
def main(layers, training_iterations, test_data_file, train_data_file, validate_data_file, data_name):
    """Run this experiment"""
    training_ints = base.initialize_instances(train_data_file)
    testing_ints = base.initialize_instances(test_data_file)
    validation_ints = base.initialize_instances(validate_data_file)
    factory = BackPropagationNetworkFactory()
    measure = SumOfSquaresError()
    data_set = DataSet(training_ints)
    relu = RELU()
    # 50 and 0.000001 are the defaults from RPROPUpdateRule.java
    rule = RPROPUpdateRule(0.064, 50, 0.000001)
    oa_names = ["Backprop"]
    classification_network = factory.createClassificationNetwork(layers, relu)
    with open(OUTFILE.format(data_name, 'Backprop'), 'a+') as f:
        content = f.read()
        if "MSE_trg" not in content:
            f.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format('iteration', 'MSE_trg', 'MSE_val', 'MSE_tst', 'acc_trg',
                                                        'acc_val', 'acc_tst', 'f1_trg', 'f1_val', 'f1_tst',
                                                        'elapsed'))
    base.train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network,
               'Backprop', training_ints, validation_ints, testing_ints, measure, training_iterations,
               OUTFILE.format(data_name, 'Backprop'))
    return
Ejemplo n.º 6
0
def simple_case():
	features = [1.0, 3.0, 2.0]
	weights = [0, 0, 0]
	goal_pred = 10.0
	alpha = 1e-2
	times = 100
	print(f"goal_prediction={goal_pred}")
	label = predict(features, weights)
	print(f"*Before training*")
	print(f"prediction={label}")
	print(f"*After training*")
	weights = train(features, weights, goal_pred, times, alpha)
	label = predict(features, weights)
	print(f"prediction={label}")
	print(f"confidence={round(100-(abs(label-goal_pred))*100/(label+goal_pred), 2)}%")
	print(f"weights={weights}")
Ejemplo n.º 7
0
 def train(self, goal_pred, inputs, times=500, alpha=0.01):
     self.weights = train(inputs, self.weights, goal_pred, times, alpha)
Ejemplo n.º 8
0
import argparse

from base import Config, train

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--lr', default=0.01, type=float)
    parser.add_argument('--ld', default=0.3, type=float)
    parser.add_argument('--wd', default=0.001, type=float)
    parser.add_argument('--dr', default=1, type=int)

    parser.add_argument('--ar', default='peng', type=str)

    parser.add_argument('--ch', nargs='+', default=(32, 64, 128, 256, 256, 64), type=int)
    parser.add_argument('--ta', default='ageC', type=str)

    args = parser.parse_args()

    cfg = Config(learning_rate=args.lr, lr_decay=args.ld, weight_decay=args.wd, dropout=bool(args.dr), arch=args.ar,
                 channels=args.ch, target=args.ta)
    train(cfg)
Ejemplo n.º 9
0
        # This also makes training faster, less work to do!
        fc7 = tf.stop_gradient(fc7)

        # TODO: Add the final layer for traffic sign classification.
        nb_classes = 43
        shape = (fc7.get_shape().as_list()[-1], nb_classes
                 )  # use this shape for the weight matrix

        with tf.name_scope("last_layer"):
            weights = tf.Variable(tf.random_normal(shape, stddev=0.01),
                                  name="weights")
            bias = tf.Variable(tf.zeros(nb_classes), name="bias")
            logits = tf.add(tf.matmul(fc7, weights), bias)

        return logits

    def _create_loss_op(self):
        one_hot_y = tf.one_hot(self.Y, 43)
        return tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=self.inference_op,
                                                    labels=one_hot_y))


# TODO: Train and evaluate the feature extraction model.
train(FeatureExtFromAlexNet(),
      X_train,
      y_train,
      X_test,
      y_test,
      ckpt="ckpt/cnn")
Ejemplo n.º 10
0
def train():
    """
        Train CIFAR-10 for a number of steps.
    """
    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()

        # Get images and labels for CIFAR-10.
        # Force input pipeline to CPU:0 to avoid operations sometimes ending up on
        # GPU and resulting in a slow down.
        with tf.device('/cpu:0'):
            images, labels = base.distorted_inputs()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = base.inference(images)

        # Calculate loss.
        loss = base.loss(logits, labels)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        train_op = base.train(loss, global_step)

        class _LoggerHook(tf.train.SessionRunHook):
            """
                Logs loss and runtime.
            """
            def begin(self):
                self._step = -1
                self._start_time = time.time()

            def before_run(self, run_context):
                self._step += 1
                return tf.train.SessionRunArgs(loss)  # Asks for loss value.

            def after_run(self, run_context, run_values):
                if self._step % FLAGS.log_frequency == 0:
                    current_time = time.time()
                    duration = current_time - self._start_time
                    self._start_time = current_time

                    loss_value = run_values.results
                    examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
                    sec_per_batch = float(duration / FLAGS.log_frequency)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), self._step, loss_value,
                                        examples_per_sec, sec_per_batch))

        with tf.train.MonitoredTrainingSession(
                checkpoint_dir=FLAGS.train_dir,
                hooks=[
                    tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
                    tf.train.NanTensorHook(loss),
                    _LoggerHook()
                ],
                config=tf.ConfigProto(log_device_placement=FLAGS.
                                      log_device_placement)) as mon_sess:
            while not mon_sess.should_stop():
                mon_sess.run(train_op)