Exemple #1
0
    bn4 = BatchNormLayer(net3, sess, beta=beta, gamma=gamma)
    net4 = HiddenLayer(bn4,
                       1,
                       sess,
                       non_liniarity=non_lin,
                       bactivate=bactivate,
                       unsupervised_cost=.001,
                       noise_std=noise_std)
    bn5 = BatchNormLayer(net4, sess, beta=beta, gamma=gamma)
    outputNet = HiddenLayer(bn5,
                            10,
                            sess,
                            non_liniarity=tf.sigmoid,
                            bactivate=False,
                            supervised_cost=1.)

    trainer = CategoricalTrainer(outputNet, 0.15)
    trainPolicy = TrainPolicy(trainer,
                              data,
                              batch_size,
                              max_iterations=3000,
                              grow_after_turns_without_improvement=2,
                              start_grow_epoch=1,
                              learn_rate_decay=0.99,
                              learn_rate_boost=0.01,
                              back_loss_on_misclassified_only=True)

    trainPolicy.run_full()

    print trainer.accuracy(data.test.features, data.test.labels)
with tf.Session() as sess:
    net, trainer = create_network(sess, hidden_layers)

    # train minimal model on mnist/load checkpoints
    if not os.path.exists(checkpoint_path):
        os.mkdir(checkpoint_path)
    saver = tf.train.Saver()
    checkpoints = tf.train.get_checkpoint_state(checkpoint_path)

    if checkpoints:
        saver.restore(sess, checkpoints.model_checkpoint_path)
        print("Loaded checkpoints %s" % checkpoints.model_checkpoint_path)
    else:
        print("retraining network")
        tp = TrainPolicy(trainer,
                         data,
                         batch_size,
                         learn_rate_decay=learn_rate_decay)
        tp.train_till_convergence()

        if SAVE:
            saver.save(sess, checkpoint_path + "/network")

    # get train error
    print("train error ",
          trainer.accuracy(data.validation.features, data.validation.labels))

    # get reconstruction errors
    print trainer.back_losses_per_layer(data.train.features)

    # get error just on miss-classifications
    print trainer.back_losses_per_layer(data.train.features,
    inputs = tf.placeholder(tf.float32, shape=(None, 784))

    bactivate = True
    noise_std = 0.3
    beta = 0.5
    gamma = 0.5
    non_lin = tf.nn.sigmoid
    input_layer = InputLayer(inputs)
    bn1 = BatchNormLayer(input_layer, sess, beta=beta, gamma=gamma)
    net1 = Layer(bn1, 1, sess, non_liniarity=non_lin, bactivate=bactivate, unsupervised_cost=.001, noise_std=noise_std)
    bn2 = BatchNormLayer(net1, sess, beta=beta, gamma=gamma)
    net2 = Layer(bn2, 1, sess, non_liniarity=non_lin, bactivate=bactivate, unsupervised_cost=.001, noise_std=noise_std)
    bn3 = BatchNormLayer(net2, sess, beta=beta, gamma=gamma)
    net3 = Layer(bn3, 1, sess, non_liniarity=non_lin, bactivate=bactivate, unsupervised_cost=.001, noise_std=noise_std)
    bn4 = BatchNormLayer(net3, sess, beta=beta, gamma=gamma)
    net4 = Layer(bn4, 1, sess, non_liniarity=non_lin, bactivate=bactivate, unsupervised_cost=.001, noise_std=noise_std)
    bn5 = BatchNormLayer(net4, sess, beta=beta, gamma=gamma)
    outputNet = Layer(bn5, 10, sess, non_liniarity=tf.sigmoid, bactivate=False, supervised_cost=1.)

    trainer = CategoricalTrainer(outputNet, 0.15)
    trainPolicy = TrainPolicy(trainer, data, batch_size, max_iterations=3000,
                              grow_after_turns_without_improvement=2,
                              start_grow_epoch=1,
                              learn_rate_decay=0.99,
                              learn_rate_boost=0.01,
                              back_loss_on_misclassified_only=True)

    trainPolicy.run_full()

    print trainer.accuracy(data.test.images, data.test.labels)
with tf.Session() as sess:
    net, trainer = create_network(sess, hidden_layers)

    # train minimal model on mnist/load checkpoints
    if not os.path.exists(checkpoint_path):
        os.mkdir(checkpoint_path)
    saver = tf.train.Saver()
    checkpoints = tf.train.get_checkpoint_state(checkpoint_path)

    if checkpoints:
        saver.restore(sess, checkpoints.model_checkpoint_path)
        print ("Loaded checkpoints %s" % checkpoints.model_checkpoint_path)
    else:
        print ("retraining network")
        tp = TrainPolicy(trainer, data, batch_size, learn_rate_decay=learn_rate_decay)
        tp.train_till_convergence()

        if SAVE:
            saver.save(sess, checkpoint_path + "/network")

    # get train error
    print ("train error ", trainer.accuracy(data.validation.images, data.validation.labels))

    # get reconstruction errors
    print trainer.back_losses_per_layer(data.train.images)

    # get error just on miss-classifications
    print trainer.back_losses_per_layer(data.train.images, misclassification_only=True, labels=data.train.labels)

    results = {}