コード例 #1
0
ファイル: train.py プロジェクト: yunsangq/tensorflow-cifar10
def run():
    test_images, test_labels = datasets.load_cifar10(is_train=False)
    train_acc = []
    train_cost = []
    test_acc = []
    test_cost = []
    save_dir = "./models/network_01"
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    for epoch in xrange(100):
        train_images, train_labels = datasets.load_cifar10(is_train=True)
        num_epoch = 1
        start_time = time.time()
        network.fit(train_images, train_labels)
        duration = time.time() - start_time
        examples_per_sec = (num_epoch * len(train_images)) / duration
        train_accuracy, train_loss = network.score(train_images, train_labels)
        test_accuracy, test_loss = network.score(test_images, test_labels)
        summary = {
            "epoch": epoch,
            "name": epoch,
            "train_accuracy": train_accuracy,
            "test_accuracy": test_accuracy,
            "train_loss": train_loss,
            "test_loss": test_loss,
            "examples_per_sec": examples_per_sec,
        }
        print "[%(epoch)d][%(name)s]train-acc: %(train_accuracy).3f, train-loss: %(train_loss).3f," % summary,
        print "test-acc: %(test_accuracy).3f, test-loss: %(test_loss).3f, %(examples_per_sec).1f examples/sec" % summary
        train_acc.append(train_accuracy)
        train_cost.append(train_loss)
        test_acc.append(test_accuracy)
        test_cost.append(test_loss)

        network.save(save_dir + "/model_%d.ckpt" % epoch)

    save("./json/network_01.json", train_acc, train_cost, test_acc, test_cost)
コード例 #2
0
    args = parser.parse_args()

    embedding_dim = args.embedding_dim
    ranking_loss = args.ranking_loss
    snapshot = args.snapshot
    net_data = np.load(args.initial_parameters).item()
    image_placeholder = tf.placeholder(dtype=global_dtype,
                                       shape=[batch_size, 227, 227, 3])
    var_dict = nw.get_variable_dict(net_data)
    SPP = args.spp
    pooling = args.pooling
    with tf.variable_scope("ranker") as scope:
        feature_vec = nw.build_alexconvnet(image_placeholder,
                                           var_dict,
                                           embedding_dim,
                                           SPP=SPP,
                                           pooling=pooling)
        score_func = nw.score(feature_vec)

    # load pre-trained model
    saver = tf.train.Saver(tf.global_variables())
    sess = tf.Session(config=tf.ConfigProto())
    sess.run(tf.global_variables_initializer())
    saver.restore(sess, snapshot)

    print "Snapshot: {}".format(snapshot)
    start_time = time.time()
    evaluate_FCDB()
    print("--- %s seconds ---" % (time.time() - start_time))
コード例 #3
0
    parser.add_argument("--initial_parameters", help="Path to initial parameter file", type=str, default="alexnet.npy")
    parser.add_argument("--ranking_loss", help="Type of ranking loss", type=str, choices=['ranknet', 'svm'], default='svm')
    parser.add_argument("--snapshot", help="Name of the checkpoint files", type=str, default='./snapshots/model-spp-max')
    parser.add_argument("--spp", help="Whether to use spatial pyramid pooling in the last layer or not", type=str2bool, default=True)
    parser.add_argument("--pooling", help="Which pooling function to use", type=str, choices=['max', 'avg'], default='max')

    args = parser.parse_args()

    embedding_dim = args.embedding_dim
    ranking_loss = args.ranking_loss
    snapshot = args.snapshot
    net_data = np.load(args.initial_parameters).item()
    image_placeholder = tf.placeholder(dtype=global_dtype, shape=[batch_size,227,227,3])
    var_dict = nw.get_variable_dict(net_data)
    SPP = args.spp
    pooling = args.pooling
    with tf.variable_scope("ranker") as scope:
        feature_vec = nw.build_alexconvnet(image_placeholder, var_dict, embedding_dim, SPP=SPP, pooling=pooling)
        score_func = nw.score(feature_vec)

    # load pre-trained model
    saver = tf.train.Saver(tf.global_variables())
    sess = tf.Session(config=tf.ConfigProto())
    sess.run(tf.global_variables_initializer())
    saver.restore(sess, snapshot)

    print "Snapshot: {}".format(snapshot)
    start_time = time.time()
    evaluate_FCDB()
    print("--- %s seconds ---" % (time.time() - start_time))