def setup_graph():
    graph_params = {}
    graph_params['graph'] = tf.Graph()
    with graph_params['graph'].as_default():
        model_params = model.params()
        graph_params['target_image'] = tf.placeholder(
            tf.float32,
            shape=(1, model.IMG_HEIGHT, model.IMG_WIDTH, model.IMG_CHANNELS))
        logits = model.cnn(graph_params['target_image'],
                           model_params,
                           keep_prob=1.0)
        graph_params['pred'] = tf.nn.softmax(logits)
        graph_params['saver'] = tf.train.Saver()
    return graph_params
예제 #2
0
def main():
    if len(sys.argv) > 1:
        test_image_fn = sys.argv[1]
        if not os.path.exists(test_image_fn):
            print("Not found:", test_image_fn)
            sys.exit(-1)
    else:
        # Select a test image from a test directory
        test_dirs = [
            os.path.join(common.CROPPED_AUG_IMAGE_DIR, class_name, 'test')
            for class_name in common.CLASS_NAME
        ]
        test_dir = np.random.choice(test_dirs)
        test_images_fn = [test_image for test_image in os.listdir(test_dir)]
        test_image_fn = np.random.choice(test_images_fn, 1)[0]
        test_image_fn = os.path.join(test_dir, test_image_fn)
    print("Test image:", test_image_fn)

    # Open and resize a test image
    if common.CNN_IN_CH == 1:
        test_image_org = skimage.io.imread(test_image_fn, as_grey=True)
        test_image_org = test_image_org.reshape(
            common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH, common.CNN_IN_CH)
    else:
        test_image_org = skimage.io.imread(test_image_fn)
    if test_image_org.shape != (common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH,
                                common.CNN_IN_CH):
        test_image_org = imresize(
            test_image_org, (common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH),
            interp='bicubic')
    test_image_org = preprocess.scaling(test_image_org)
    test_image = test_image_org.reshape(
        (1, common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH,
         common.CNN_IN_CH)).astype(np.float32)

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Weights and biases
        model_params = model.params()

        # restore weights
        f = "weights.npz"
        if os.path.exists(f):
            initial_weights = load_initial_weights(f)
        else:
            initial_weights = None

        if initial_weights is not None:
            assert len(initial_weights) == len(model_params)
            assign_ops = [
                w.assign(v) for w, v in zip(model_params, initial_weights)
            ]

        # A placeholder for a test image
        tf_test_image = tf.constant(test_image)

        # model
        logits = model.cnn(tf_test_image, model_params, keep_prob=1.0)
        test_pred = tf.nn.softmax(logits)

        # Restore ops
        saver = tf.train.Saver()

    # Recognize a brand logo of test image
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned weights')
        elif os.path.exists("models"):
            save_path = "models/deep_logo_model"
            saver.restore(session, save_path)
            print('Model restored')
        else:
            print('initialized')
        pred = session.run([test_pred])
        print("Class name:", common.CLASS_NAME[np.argmax(pred)])
        print("Probability:", np.max(pred))
def main():
    if len(sys.argv) > 1:
        test_image_fn = sys.argv[1]
        if not os.path.exists(test_image_fn):
            print("Not found:", test_image_fn)
            sys.exit(-1)
    else:
        # Select a test image from a test directory
        test_dirs = [
            os.path.join(common.CROPPED_AUG_IMAGE_DIR, class_name, 'test')
            for class_name in common.CLASS_NAME
        ]
        test_dir = np.random.choice(test_dirs)
        test_images_fn = [test_image for test_image in os.listdir(test_dir)]
        test_image_fn = np.random.choice(test_images_fn, 1)[0]
        test_image_fn = os.path.join(test_dir, test_image_fn)
    print("Test image:", test_image_fn)

    # Open and resize a test image
    if common.CNN_IN_CH == 1:
        test_image_org = skimage.io.imread(test_image_fn, as_grey=True)
        test_image_org = test_image_org.reshape(common.CNN_IN_HEIGHT,
                                                common.CNN_IN_WIDTH,
                                                common.CNN_IN_CH)
    else:
        test_image_org = skimage.io.imread(test_image_fn)
    if test_image_org.shape != (common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH,
                                common.CNN_IN_CH):
        test_image_org = imresize(test_image_org,
                                  (common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH),
                                  interp='bicubic')
    test_image_org = preprocess.scaling(test_image_org)
    test_image = test_image_org.reshape(
        (1, common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH,
         common.CNN_IN_CH)).astype(np.float32)

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Weights and biases
        model_params = model.params()

        # restore weights
        f = "weights.npz"
        if os.path.exists(f):
            initial_weights = load_initial_weights(f)
        else:
            initial_weights = None

        if initial_weights is not None:
            assert len(initial_weights) == len(model_params)
            assign_ops = [
                w.assign(v) for w, v in zip(model_params, initial_weights)
            ]

        # A placeholder for a test image
        tf_test_image = tf.constant(test_image)

        # model
        logits = model.cnn(tf_test_image, model_params, keep_prob=1.0)
        test_pred = tf.nn.softmax(logits)

        # Restore ops
        saver = tf.train.Saver()

    # Recognize a brand logo of test image
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned weights')
        elif os.path.exists("models"):
            save_path = "models/deep_logo_model"
            saver.restore(session, save_path)
            print('Model restored')
        else:
            print('initialized')
        pred = session.run([test_pred])
        print("Class name:", common.CLASS_NAME[np.argmax(pred)])
        print("Probability:", np.max(pred))
예제 #4
0
def main():
    if len(sys.argv) > 1:
        f = np.load(sys.argv[1])

        # f.files has unordered keys ['arr_8', 'arr_9', 'arr_6'...]
        # Sorting keys by value of numbers
        initial_weights = [
            f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))
        ]
    else:
        initial_weights = None

    # read input data
    dataset, labels = read_data()

    train_dataset, train_labels = reformat(dataset[0], labels[0])
    valid_dataset, valid_labels = reformat(dataset[1], labels[1])
    test_dataset, test_labels = reformat(dataset[2], labels[2])
    print('Training set', train_dataset.shape, train_labels.shape)
    print('Valid set', valid_dataset.shape, valid_labels.shape)
    print('Test set', test_dataset.shape, test_labels.shape)

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Weights and biases
        model_params = model.params()

        # Initial weights
        if initial_weights is not None:
            assert len(model_params) == len(initial_weights)
            assign_ops = [
                w.assign(v) for w, v in zip(model_params, initial_weights)
            ]

        # Input data
        tf_train_dataset = tf.placeholder(
            tf.float32,
            shape=(FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width,
                   FLAGS.num_channels))
        tf_train_labels = tf.placeholder(tf.float32,
                                         shape=(FLAGS.batch_size,
                                                model.NUM_CLASSES))
        tf_valid_dataset = tf.constant(valid_dataset)
        tf_test_dataset = tf.constant(test_dataset)

        # Training computation
        logits = model.cnn(tf_train_dataset, model_params, keep_prob=0.5)
        with tf.name_scope('loss'):
            loss = tf.reduce_sum(
                tf.nn.softmax_cross_entropy_with_logits(
                    logits=logits, labels=tf_train_labels))
            tf.summary.scalar('loss', loss)
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)

        # Predictions for the training, validation, and test data
        train_prediction = tf.nn.softmax(logits)
        valid_prediction = tf.nn.softmax(
            model.cnn(tf_valid_dataset, model_params, keep_prob=1.0))
        test_prediction = tf.nn.softmax(
            model.cnn(tf_test_dataset, model_params, keep_prob=1.0))
        # Merge all summaries
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(FLAGS.train_dir + '/train')

        # Add ops to save and restore all the variables
        saver = tf.train.Saver()

    # Do training
    cpu_count = 1
    config = tf.ConfigProto(intra_op_parallelism_threads=cpu_count,
                            inter_op_parallelism_threads=cpu_count,
                            allow_soft_placement=True,
                            device_count={"CPU": cpu_count})
    with tf.Session(graph=graph, config=config) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned values')
        else:
            print('initialized')
        for step in range(FLAGS.max_steps):
            offset = (step * FLAGS.batch_size) % (train_labels.shape[0] -
                                                  FLAGS.batch_size)
            batch_data = train_dataset[offset:(offset +
                                               FLAGS.batch_size), :, :, :]
            batch_labels = train_labels[offset:(offset + FLAGS.batch_size), :]
            feed_dict = {
                tf_train_dataset: batch_data,
                tf_train_labels: batch_labels
            }
            try:
                _, l, predictions = session.run(
                    [optimizer, loss, train_prediction], feed_dict=feed_dict)
                if step % 50 == 0:
                    summary, _ = session.run([merged, optimizer],
                                             feed_dict=feed_dict)
                    train_writer.add_summary(summary, step)
                    print('Minibatch loss at step %d: %f' % (step, l))
                    print('Minibatch accuracy: %.1f%%' %
                          accuracy(predictions, batch_labels))
                    print('Validation accuracy: %.1f%%' %
                          accuracy(valid_prediction.eval(), valid_labels))
            except KeyboardInterrupt:
                last_weights = [p.eval() for p in model_params]
                np.savez("weights.npz", *last_weights)
                return last_weights

        print('Test accuracy: %.1f%%' %
              accuracy(test_prediction.eval(), test_labels))

        # Save the variables to disk.
        save_dir = "models"
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        save_path = os.path.join(save_dir, "deep_logo_model")
        saved = saver.save(session, save_path)
        print("Model saved in file: %s" % saved)
예제 #5
0
def main():
    if len(sys.argv) > 1:
        f = np.load(sys.argv[1])

        # f.files has unordered keys ['arr_8', 'arr_9', 'arr_6'...]
        # Sorting keys by value of numbers
        initial_weights = [
            f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))
        ]
    else:
        initial_weights = None

    # read input data
    dataset, labels = read_data()

    train_dataset, train_labels = reformat(dataset[0], labels[0])
    valid_dataset, valid_labels = reformat(dataset[1], labels[1])
    test_dataset, test_labels = reformat(dataset[2], labels[2])
    print('Training set', train_dataset.shape, train_labels.shape)
    print('Valid set', valid_dataset.shape, valid_labels.shape)
    print('Test set', test_dataset.shape, test_labels.shape)

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Weights and biases
        model_params = model.params()

        # Initial weights
        if initial_weights is not None:
            assert len(model_params) == len(initial_weights)
            assign_ops = [
                w.assign(v) for w, v in zip(model_params, initial_weights)
            ]

        # Input data
        tf_train_dataset = tf.placeholder(
            tf.float32,
            shape=(FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width,
                   FLAGS.num_channels))
        tf_train_labels = tf.placeholder(
            tf.float32, shape=(FLAGS.batch_size, model.NUM_CLASSES))
        tf_valid_dataset = tf.constant(valid_dataset)
        tf_test_dataset = tf.constant(test_dataset)

        # Training computation
        logits = model.cnn(tf_train_dataset, model_params, keep_prob=0.5)
        with tf.name_scope('loss'):
            loss = tf.reduce_sum(
                tf.nn.softmax_cross_entropy_with_logits(
                    logits=logits, labels=tf_train_labels))
            tf.summary.scalar('loss', loss)
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)

        # Predictions for the training, validation, and test data
        train_prediction = tf.nn.softmax(logits)
        valid_prediction = tf.nn.softmax(
            model.cnn(tf_valid_dataset, model_params, keep_prob=1.0))
        test_prediction = tf.nn.softmax(
            model.cnn(tf_test_dataset, model_params, keep_prob=1.0))
        # Merge all summaries
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(FLAGS.train_dir + '/train')

        # Add ops to save and restore all the variables
        saver = tf.train.Saver()

    # Do training
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned values')
        else:
            print('initialized')
        for step in range(FLAGS.max_steps):
            offset = (step * FLAGS.batch_size) % (
                train_labels.shape[0] - FLAGS.batch_size)
            batch_data = train_dataset[offset:(offset + FLAGS.batch_size
                                               ), :, :, :]
            batch_labels = train_labels[offset:(offset + FLAGS.batch_size), :]
            feed_dict = {
                tf_train_dataset: batch_data,
                tf_train_labels: batch_labels
            }
            try:
                _, l, predictions = session.run(
                    [optimizer, loss, train_prediction], feed_dict=feed_dict)
                if step % 50 == 0:
                    summary, _ = session.run(
                        [merged, optimizer], feed_dict=feed_dict)
                    train_writer.add_summary(summary, step)
                    print('Minibatch loss at step %d: %f' % (step, l))
                    print('Minibatch accuracy: %.1f%%' % accuracy(
                        predictions, batch_labels))
                    print('Validation accuracy: %.1f%%' % accuracy(
                        valid_prediction.eval(), valid_labels))
            except KeyboardInterrupt:
                last_weights = [p.eval() for p in model_params]
                np.savez("weights.npz", *last_weights)
                return last_weights

        print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(),
                                                 test_labels))

        # Save the variables to disk.
        save_dir = "models"
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        save_path = os.path.join(save_dir, "deep_logo_model")
        saved = saver.save(session, save_path)
        print("Model saved in file: %s" % saved)