예제 #1
0
def main():
    print('Building model...')
    model = ImageModel()
    print('Loading ImageNet...')
    data = load_tiny_imagenet("./data")
    X_train = data['X_train']
    y_train = data['y_train']
    X_val = data['X_val']
    y_val = data['y_val']
    X_test = data['X_test']
    class_names = data['class_names']
    test_files = data['test_files']
    # y_test = data['y_test']

    # print('Test labels shape: ', y_test.shape)
    # permute the data axes
    X_train = np.transpose(X_train, (0, 2, 3, 1))
    X_val = np.transpose(X_val, (0, 2, 3, 1))
    X_test = np.transpose(X_test, (0, 2, 3, 1))

    print('Train data shape: ', X_train.shape)
    print('Train labels shape: ', y_train.shape)
    print('Validation data shape: ', X_val.shape)
    print('Validation labels shape: ', y_val.shape)
    print('Test data shape: ', X_test.shape)
    print(len(class_names))

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    cp_saver = tf.train.Saver(tf.trainable_variables())
    # cp_saver = tf.train.Saver()
    cp_saver.restore(sess, "ckpts/v_simple/model.ckpt-10")
    # print('Training')
    # # logs_path = "tensorboard/" + strftime("%Y_%m_%d_%H_%M_%S", gmtime())
    # # train_writer = tf.summary.FileWriter(logs_path + '/train', session.graph)
    # model.run(sess, X_train, y_train, True, epochs=10)
    # print('Validation')
    # model.run(sess, X_val, y_val, False, epochs=1)
    # # run_model(sess,X_val,y_val)
    model.run_with_valid(sess,
                         X_train,
                         y_train,
                         X_val,
                         y_val,
                         epochs=30,
                         batch_size=32)
예제 #2
0
def load_dataset(dataset):
    if dataset == "cifar10":
        return generate_dataset_cifar10("../datasets/cifar-10-batches-py")
    elif dataset == "tiny200":
        class_names, X_train, y_train, X_val, y_val = load_tiny_imagenet(
            path="../datasets/tiny-imagenet-200/",
            wnids_path="",
            resize='False',
            num_classes=200,
            dtype=np.float32)

        N = X_train.shape[0]
        Y_train = np.zeros((N, 200))
        Y_train[np.arange(N), y_train] = 1

        N = X_val.shape[0]
        Y_val = np.zeros((N, 200))
        Y_val[np.arange(N), y_val] = 1

        return X_train, Y_train, X_val, Y_val
def process_images(wnids_path='', resize='False', num_classes=200, normalize='False'):
	# Path to tiny imagenet dataset
	if wnids_path == '':
		wnids_path = input('Enter the relative path to the directory containing the wnids/words files from sets/: ')
	wnids_path = os.path.join('..', 'sets', wnids_path)
	
	# Generate data fields - test data has no labels so ignore it
	classes, x_train, y_train, x_val, y_val = load_tiny_imagenet(os.path.join('tiny-imagenet-200'), wnids_path, num_classes=num_classes, resize=resize)
	
	# Format data to be the correct shape
	x_train = np.einsum('iljk->ijkl', x_train)
	x_val = np.einsum('iljk->ijkl', x_val)

	if normalize.lower() == 'true':
		x_train /= 255
		x_val /= 255

	# Convert labels to one hot vectors
	y_train = keras.utils.to_categorical(y_train, num_classes)
	y_val = keras.utils.to_categorical(y_val, num_classes)

	return x_train, y_train, x_val, y_val, wnids_path
예제 #4
0
def process_images(num_classes=10):
    # Path to tiny imagenet dataset
    #path = input('Enter the relative path to the directory containing the wnids/words files: ')
    path = os.path.join('tiny-imagenet-200')
    #path = os.path.join('tiny-imagenet-200', 'random', '0')
    print(path)
    # Generate data fields - test data has no labels so ignore it
    classes, x_train, y_train, x_val, y_val = load_tiny_imagenet(
        path,
        os.path.join('random', '0'),
        num_classes=num_classes,
        resize=True)
    # Get number of classes specified in order from [0, num_classes)
    print(classes)
    #print(x_train)
    print(x_train.shape)
    print(y_train.shape)
    """
	if num_classes > 200:
		print('Set number of classes to maximum of 200\n')
		num_classes = 200
	elif num_classes != 200:
		train_indices = [index for index, label in enumerate(y_train) if label < num_classes]
		val_indices = [index for index, label in enumerate(y_val) if label < num_classes]
		x_train = x_train[train_indices]
		y_train = y_train[train_indices]
		x_val = x_val[val_indices]
		y_val = y_val[val_indices]
	"""

    # Format data to be the correct shape
    x_train = np.einsum('iljk->ijkl', x_train)
    x_val = np.einsum('iljk->ijkl', x_val)

    # Convert labels to one hot vectors
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_val = keras.utils.to_categorical(y_val, num_classes)

    return x_train, y_train, x_val, y_val
예제 #5
0
def main(argv):
    #%% load data
    data = load_tiny_imagenet('datasets/tiny-imagenet-100-A',
                              subtract_mean=True)
    print data['X_train'].shape

    mnist_graph = tf.Graph()
    with mnist_graph.as_default():
        # Generate placeholders for the images and labels.
        images_placeholder = tf.placeholder(tf.float32)
        labels_placeholder = tf.placeholder(tf.int32)
        tf.add_to_collection("images", images_placeholder)  # Remember this Op.
        tf.add_to_collection("labels", labels_placeholder)  # Remember this Op.

        # Build a Graph that computes predictions from the inference model.
        logits, keep_prob = mnist_inference(images_placeholder,
                                            FLAGS.HIDDEN1_UNITS,
                                            FLAGS.HIDDEN2_UNITS)
        tf.add_to_collection("logits", logits)  # Remember this Op.

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op, loss_op = mnist_training(logits, labels_placeholder,
                                           FLAGS.lr)

        # Add the variable initializer Op.
        init = tf.global_variables_initializer()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        tf.summary.scalar("Cost", loss_op)
        # Uncomment the following line to see what we have constructed.
        # tf.train.write_graph(tf.get_default_graph().as_graph_def(),
        #                      "/tmp", "complete.pbtxt", as_text=True)

    with tf.Session(graph=mnist_graph) as sess:
        # Merge all the tf.summary
        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter('model', sess.graph)

        # Run the Op to initialize the variables.
        sess.run(init)

        # Start the training loop.
        train_ds = DataSet(data['X_train'], data['y_train'])
        max_steps = int(FLAGS.MAX_STEPS)
        for step in xrange(max_steps):
            images, labels = train_ds.next_batch(256)
            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss, summary = sess.run(
                [train_op, loss_op, summary_op],
                feed_dict={
                    images_placeholder: images,
                    labels_placeholder: labels,
                    keep_prob: 1.0
                })
            train_writer.add_summary(summary, step)
            if step % 100 == 0:  # Record execution stats
                print('Step %d/%d -- loss: %f' % (step, max_steps, loss))

        # Write a checkpoint.
        train_writer.close()
        checkpoint_file = os.path.join(FLAGS.MODEL_SAVE_PATH, 'checkpoint')
        saver.save(sess, checkpoint_file, global_step=step)
    print train_ds.epoch_completed
예제 #6
0
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, CURRENT_DIR + '\\helper')
from data_utils import load_tiny_imagenet

##from image_utils import image_from_url

##
sys.path.insert(0, CURRENT_DIR + '\\classifier')
from pretrained_cnn import PretrainedCNN

plt.rcParams['figure.figsize'] = (10.0, 8.0)  # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'

data = load_tiny_imagenet('datasets/tiny-imagenet-100-A', subtract_mean=True)

# Visualize some examples of the training data
classes_to_show = 7
examples_per_class = 5

class_idxs = np.random.choice(len(data['class_names']),
                              size=classes_to_show,
                              replace=False)
for i, class_idx in enumerate(class_idxs):
    train_idxs, = np.nonzero(data['y_train'] == class_idx)
    train_idxs = np.random.choice(train_idxs,
                                  size=examples_per_class,
                                  replace=False)
    for j, train_idx in enumerate(train_idxs):
        img = deprocess_image(data['X_train'][train_idx], data['mean_image'])