コード例 #1
0
def main(argv=None):
    define()
    if (FLAGS.summaries_dir and not os.path.exists(FLAGS.summaries_dir)):
        os.makedirs(FLAGS.summaries_dir)
    setup_logger('log',
                 stream=True,
                 log_file=FLAGS.summaries_dir + "mnist_gpdnn_results.txt")
    global logger
    logger = logging.getLogger('log')
    logger.info(FLAGS)
    mnist_tutorial(nb_epochs=FLAGS.nb_epochs,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   clean_train=FLAGS.clean_train,
                   backprop_through_attack=FLAGS.backprop_through_attack)
コード例 #2
0
def main(argv=None):
    define()
    if (FLAGS.summaries_dir and not os.path.exists(FLAGS.summaries_dir)):
        os.makedirs(FLAGS.summaries_dir)
    setup_logger('log',
                 stream=True,
                 log_file=FLAGS.summaries_dir + "results.txt")
    global logger
    logger = logging.getLogger('log')
    logger.info(FLAGS)
    mnist_blackbox(nb_classes=FLAGS.nb_classes,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   nb_epochs=FLAGS.nb_epochs,
                   holdout=FLAGS.holdout,
                   data_aug=FLAGS.data_aug,
                   nb_epochs_s=FLAGS.nb_epochs_s,
                   lmbda=FLAGS.lmbda)
コード例 #3
0
        indx_array = np.mod(np.arange(data_indx, data_indx + minibatch_size), x_train.shape[0])
        data_indx += minibatch_size

        fd = gp_model.feeds or {}
        fd.update({
            phs.ximage_flat: x_train[indx_array],
            phs.label: y_train[indx_array]
        })
        _, loss_evd = tf_session.run([minimise, -gp_model.objective], feed_dict=fd)

        # logger.info progress every 50 steps.
        if i % 50 == 0:
            fd = gp_model.feeds or {}
            fd.update({phs.ximage_flat: x_test,
                       phs.label: y_test})
            accuracy_evald, log_like_evald = tf_session.run([avg_acc, avg_ll], feed_dict=fd)
            logger.info("Iteration {}: Loss is {}. \nTest set LL {}, Acc {}".format(i, loss_evd, log_like_evald,
                                                                              accuracy_evald))
            results.append(dict(step_no=i, loss=loss_evd, test_acc=accuracy_evald, test_ll=log_like_evald))
    logger.info("Done!")

    # ## We save the results for looking at them later.
    df = pd.DataFrame(results)
    df.to_pickle("gpdnn_mnist_train.pdpick.pdpick")


if __name__ == '__main__':
    setup_logger('log', stream=True)
    global logger
    logger = logging.getLogger('log')
    main()