def test_imagenet(self, imgs_):
        num_classes = 1000
        skip_layer = []
        imgs = []

        #mean of imagenet dataset in BGR
        imagenet_mean = np.array([104., 117., 124.], dtype=np.float32)
        #plot images
        fig = plt.figure(figsize=(15, 6))
        for i, img_ in enumerate(imgs_):
            img = cv2.imread(img_)
            imgs.append(img)
            fig.add_subplot(1, 3, i + 1)
            plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
            plt.axis('off')

        #placeholder for input and dropout rate
        x = tf.placeholder(tf.float32, [1, 227, 227, 3])
        keep_prob = tf.placeholder(tf.float32)
        #create model with default config ( == no skip_layer and 1000 units in the last layer)
        model = alexnet(x,
                        keep_prob,
                        num_classes,
                        skip_layer,
                        weights_path=self.PRE_MODEL)
        #define activation of last layer as score
        score = model.fc8
        #create op to calculate softmax
        softmax = tf.nn.softmax(score)

        with tf.Session() as sess:
            # Initialize all variables
            sess.run(tf.global_variables_initializer())
            # Load the pretrained weights into the model
            model.load_initial_weights(sess)
            # Create figure handle
            fig2 = plt.figure(figsize=(15, 6))
            # Loop over all images
            for i, image in enumerate(imgs):
                # Convert image to float32 and resize to (227x227)
                img = cv2.resize(image.astype(np.float32), (227, 227))
                # Subtract the ImageNet mean
                img -= imagenet_mean
                # Reshape as needed to feed into model
                img = img.reshape((1, 227, 227, 3))
                # Run the session and calculate the class probability
                probs = sess.run(softmax, feed_dict={x: img, keep_prob: 1})
                # Get the class name of the class with the highest probability
                class_name = class_names[np.argmax(probs)]
                # Plot image with class name and prob in the title
                fig2.add_subplot(1, 3, i + 1)
                plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
                plt.title("Class: " + class_name +
                          ", probability: %.4f" % probs[0, np.argmax(probs)])
                plt.axis('off')
        plt.show()
Ejemplo n.º 2
0
def test():
    global BATCH_SIZE
    BATCH_SIZE = 128
    isLoad = False

    data_train = get_data('train', BATCH_SIZE)
    # data_test = get_data('val', BATCH_SIZE)
    # data_test.reset_state()
    # generator = data_test.get_data()

    model = alexnet_model.alexnet(isLoad)
    inference(model)
Ejemplo n.º 3
0
def _test_alexnet_graph_repr(test_case, args):
    train_data_loader = OFRecordDataLoader(
        ofrecord_root=args.ofrecord_path,
        mode="train",
        dataset_size=args.train_dataset_size,
        batch_size=args.train_batch_size,
    )

    alexnet_module = alexnet()
    alexnet_module.to(args.device)

    of_cross_entropy = flow.nn.CrossEntropyLoss()
    of_cross_entropy.to(args.device)

    of_sgd = flow.optim.SGD(
        alexnet_module.parameters(), lr=args.learning_rate, momentum=args.mom
    )

    class AlexNetGraph(flow.nn.Graph):
        def __init__(self):
            super().__init__()
            self.alexnet = alexnet_module
            self.cross_entropy = of_cross_entropy
            self.add_optimizer(of_sgd)

        def build(self, image, label):
            logits = self.alexnet(image)
            loss = self.cross_entropy(logits, label)
            loss.backward()
            return loss

    alexnet_graph = AlexNetGraph()

    print("repr(alexnet_graph) before run: \n", repr(alexnet_graph))

    # debug graph build
    alexnet_graph.debug(1)

    alexnet_module.train()
    image, label = train_data_loader()
    image = image.to(args.device)
    label = label.to(args.device)
    loss = alexnet_graph(image, label)

    print("repr(alexnet_graph) after run: \n", repr(alexnet_graph))
Ejemplo n.º 4
0
import numpy as np
from alexnet_model import alexnet
WIDTH = 160
HEIGHT = 120
LR = 1e-3
EPOCHS = 10
MODEL_NAME = 'MY_AI_CAR.model'

model = alexnet(WIDTH, HEIGHT, LR)

train_data = np.load('training_data_final.npy')

train = train_data[:-100]
test = train_data[-100:]

X = np.array([i[0] for i in train]).reshape(-1, WIDTH, HEIGHT, 1)
Y = [i[1] for i in train]

test_x = np.array([i[0] for i in test]).reshape(-1, WIDTH, HEIGHT, 1)
test_y = [i[1] for i in test]

model.fit({'input': X}, {'targets': Y},
          n_epoch=1,
          validation_set=({
              'input': test_x
          }, {
              'targets': test_y
          }),
          snapshot_step=500,
          show_metric=True,
          run_id=MODEL_NAME)
Ejemplo n.º 5
0
    def test_infos_of_nodes(test_case):
        alexnet_module = alexnet()
        alexnet_graph = Graph(alexnet_module)
        if not alexnet_graph._is_compiled:
            alexnet_graph._compile(flow.rand(1, 3, 224, 224))
        graph_str = repr(alexnet_graph)
        if not alexnet_graph._is_compiled:
            alexnet_graph._compile(flow.rand(shape_input))

        size_where = 2
        if "cuda" in graph_str:
            size_where = 3

        p_size = re.compile(r"size=\(.*?\)", re.S)
        p_type = re.compile(r"dtype=.*?\)", re.S)
        types = ["INPUT", "PARAMETER", "BUFFER", "OUTPUT"]
        num_nodes = {}

        for t in types:
            data = re.finditer(t + ":.*", graph_str)
            cnt = 0
            for i in data:
                cnt += 1
                attrs = i.group().split(":")
                size_strs = re.findall(p_size, attrs[size_where])
                type_strs = re.findall(p_type, attrs[size_where])
                test_case.assertEqual(size_strs != [], True)
                test_case.assertEqual(type_strs != [], True)

                size_attr = size_strs[0].replace("size=", "")
                type_attr = type_strs[0].replace("dtype=", "").replace(")", "")
                if size_attr[-2] == ",":
                    size_attr = size_attr.replace(",", "")
                if type_attr[-1] == ",":
                    type_attr = type_attr.replace(",", "")
                    test_case.assertEqual(type_attr, "oneflow.float32")

                data_size = tuple(map(int, size_attr[1:-1].split(", ")))
                if cnt == 1 and t == "PARAMETER":
                    test_case.assertEqual(data_size, (64, 3, 11, 11))
                elif cnt == 15 and t == "PARAMETER":
                    test_case.assertEqual(data_size, (1000, 4096))
            num_nodes[t] = cnt

        test_case.assertEqual(num_nodes["INPUT"] != 0, True)
        test_case.assertEqual(num_nodes["BUFFER"], 0)
        test_case.assertEqual(num_nodes["PARAMETER"], 16)
        test_case.assertEqual(num_nodes["OUTPUT"] != 0, True)

        # get graph proto, if you don't _compile the graph, the _graph_proto will be None
        graph_input = re.search(r"INPUT:.*", graph_str).group().split(":")
        shape_input = tuple(
            map(
                int,
                re.findall(p_size, graph_input[size_where])[0].replace(
                    "size=", "")[1:-1].split(", "),
            ))
        graph_proto = alexnet_graph._graph_proto

        nodes = {}
        for op in graph_proto.net.op:
            nodes[op.name] = op

        op_names = []
        op_attrs = []
        for node_name in nodes:
            node = nodes[node_name]
            if is_user_op(node):
                op_name = node.user_conf.op_type_name
                op_attr = parse_attr(node.user_conf.attr)
                op_names.append(op_name)
                op_attrs.append(op_attr)

        test_case.assertEqual(op_names[0], "conv2d")
        test_case.assertEqual(op_names[1], "bias_add")
        test_case.assertEqual(op_names[2], "relu")

        kernel_size = op_attrs[0].get("kernel_size", None)
        strides = op_attrs[0].get("strides", None)
        padding_before = op_attrs[0].get("padding_before", None)
        test_case.assertEqual(kernel_size, (11, 11))
        test_case.assertEqual(strides, (4, 4))
        test_case.assertEqual(padding_before, (2, 2))
Ejemplo n.º 6
0
def _test_alexnet_graph(test_case, args):
    train_data_loader = OFRecordDataLoader(
        ofrecord_root=args.ofrecord_path,
        mode="train",
        dataset_size=args.train_dataset_size,
        batch_size=args.train_batch_size,
    )
    val_data_loader = OFRecordDataLoader(
        ofrecord_root=args.ofrecord_path,
        mode="val",
        dataset_size=args.val_dataset_size,
        batch_size=args.val_batch_size,
    )

    # oneflow init
    start_t = time.time()
    alexnet_module = alexnet()
    end_t = time.time()
    print("init time : {}".format(end_t - start_t))

    alexnet_module.to(args.device)

    of_cross_entropy = flow.nn.CrossEntropyLoss()
    of_cross_entropy.to(args.device)

    of_sgd = flow.optim.SGD(
        alexnet_module.parameters(), lr=args.learning_rate, momentum=args.mom
    )

    class AlexNetGraph(flow.nn.Graph):
        def __init__(self):
            super().__init__()
            self.train_data_loader = train_data_loader
            self.alexnet = alexnet_module
            self.cross_entropy = of_cross_entropy
            self.add_optimizer(of_sgd)

        def build(self):
            image, label = self.train_data_loader()
            image = image.to(args.device)
            label = label.to(args.device)
            logits = self.alexnet(image)
            loss = self.cross_entropy(logits, label)
            loss.backward()
            return loss

    alexnet_graph = AlexNetGraph()

    class AlexNetEvalGraph(flow.nn.Graph):
        def __init__(self):
            super().__init__()
            self.val_data_loader = val_data_loader
            self.alexnet = alexnet_module

        def build(self):
            with flow.no_grad():
                image, label = self.val_data_loader()
                image = image.to(args.device)
                logits = self.alexnet(image)
                predictions = logits.softmax()
            return predictions, label

    alexnet_eval_graph = AlexNetEvalGraph()

    of_losses = []
    all_samples = len(val_data_loader) * args.val_batch_size
    print_interval = 10

    for epoch in range(args.epochs):
        alexnet_module.train()

        for b in range(len(train_data_loader)):
            # oneflow graph train
            start_t = time.time()
            loss = alexnet_graph()
            end_t = time.time()
            if b % print_interval == 0:
                l = loss.numpy()
                of_losses.append(l)
                print(
                    "epoch {} train iter {} oneflow loss {}, train time : {}".format(
                        epoch, b, l, end_t - start_t
                    )
                )
        print("epoch %d train done, start validation" % epoch)

        alexnet_module.eval()
        correct_of = 0.0
        for b in range(len(val_data_loader)):

            start_t = time.time()
            predictions, label = alexnet_eval_graph()
            of_predictions = predictions.numpy()
            clsidxs = np.argmax(of_predictions, axis=1)

            label_nd = label.numpy()
            for i in range(args.val_batch_size):
                if clsidxs[i] == label_nd[i]:
                    correct_of += 1
            end_t = time.time()

        print("epoch %d, oneflow top1 val acc: %f" % (epoch, correct_of / all_samples))
    def fine_tuning(self, train_list, test_list, mean, snapshot,
                    filewriter_path):
        # Learning params
        learning_rate = 0.0005
        num_epochs = 151
        batch_size = 64

        # Network params
        in_img_size = (227, 227)  #(height, width)
        dropout_rate = 1
        num_classes = 2
        train_layers = ['fc7', 'fc8']

        # How often we want to write the tf.summary data to disk
        display_step = 30

        x = tf.placeholder(tf.float32,
                           [batch_size, in_img_size[0], in_img_size[1], 3])
        y = tf.placeholder(tf.float32, [None, num_classes])
        keep_prob = tf.placeholder(tf.float32)

        # Initialize model
        model = alexnet(x,
                        keep_prob,
                        num_classes,
                        train_layers,
                        in_size=in_img_size)
        #link variable to model output
        score = model.fc8
        # List of trainable variables of the layers we want to train
        var_list = [
            v for v in tf.trainable_variables()
            if v.name.split('/')[0] in train_layers
        ]
        # Op for calculating the loss
        with tf.name_scope("cross_ent"):
            loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=score,
                                                        labels=y))
            # Train op

            # Get gradients of all trainable variables
            gradients = tf.gradients(loss, var_list)
            gradients = list(zip(gradients, var_list))
            '''
            # Create optimizer and apply gradient descent to the trainable variables
            learning_rate = tf.train.exponential_decay(learning_rate,
                                           global_step=tf.Variable(0, trainable=False),
                                           decay_steps=10,decay_rate=0.9)
            '''
            optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
            train_op = optimizer.minimize(loss)

        # Add gradients to summary
        for gradient, var in gradients:
            tf.summary.histogram(var.name + '/gradient', gradient)
        # Add the variables we train to the summary
        for var in var_list:
            tf.summary.histogram(var.name, var)
        # Add the loss to summary
        tf.summary.scalar('cross_entropy', loss)

        # Evaluation op: Accuracy of the model
        with tf.name_scope("accuracy"):
            correct_pred = tf.equal(tf.argmax(score, 1), tf.argmax(y, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        # Add the accuracy to the summary
        tf.summary.scalar('accuracy', accuracy)

        # Merge all summaries together
        merged_summary = tf.summary.merge_all()
        # Initialize the FileWriter
        writer = tf.summary.FileWriter(filewriter_path)
        # Initialize an saver for store model checkpoints
        saver = tf.train.Saver()
        # Initalize the data generator seperately for the training and validation set
        train_generator = ImageDataGenerator(train_list,
                                             horizontal_flip=True,
                                             shuffle=False,
                                             mean=mean,
                                             scale_size=in_img_size,
                                             nb_classes=num_classes)
        val_generator = ImageDataGenerator(test_list,
                                           shuffle=False,
                                           mean=mean,
                                           scale_size=in_img_size,
                                           nb_classes=num_classes)
        # Get the number of training/validation steps per epoch
        train_batches_per_epoch = np.floor(train_generator.data_size /
                                           batch_size).astype(np.int16)
        val_batches_per_epoch = np.floor(val_generator.data_size /
                                         batch_size).astype(np.int16)

        # Start Tensorflow session
        with tf.Session() as sess:
            # Initialize all variables
            sess.run(tf.global_variables_initializer())
            # Add the model graph to TensorBoard
            writer.add_graph(sess.graph)
            # Load the pretrained weights into the non-trainable layer
            model.load_initial_weights(sess)
            print("{} Start training...".format(datetime.now()))
            print("{} Open Tensorboard at --logdir {}".format(
                datetime.now(), filewriter_path))
            # Loop over number of epochs
            for epoch in range(num_epochs):
                print("{} Epoch number: {}/{}".format(datetime.now(),
                                                      epoch + 1, num_epochs))
                step = 1
                while step < train_batches_per_epoch:
                    # Get a batch of images and labels
                    batch_xs, batch_ys = train_generator.next_batch(batch_size)
                    # And run the training op
                    sess.run(train_op,
                             feed_dict={
                                 x: batch_xs,
                                 y: batch_ys,
                                 keep_prob: dropout_rate
                             })
                    # Generate summary with the current batch of data and write to file
                    if step % display_step == 0:
                        s = sess.run(merged_summary,
                                     feed_dict={
                                         x: batch_xs,
                                         y: batch_ys,
                                         keep_prob: 1.
                                     })
                        writer.add_summary(
                            s, epoch * train_batches_per_epoch + step)
                    step += 1

                # Validate the model on the entire validation set
                print("{} Start validation".format(datetime.now()))
                test_acc = 0.
                test_count = 0
                for _ in range(val_batches_per_epoch):
                    batch_tx, batch_ty = val_generator.next_batch(batch_size)
                    acc = sess.run(accuracy,
                                   feed_dict={
                                       x: batch_tx,
                                       y: batch_ty,
                                       keep_prob: 1.
                                   })
                    test_acc += acc
                    test_count += 1
                test_acc /= test_count
                print("{} Validation Accuracy = {:.4f}".format(
                    datetime.now(), test_acc))

                # Reset the file pointer of the image data generator
                val_generator.reset_pointer()
                train_generator.reset_pointer()
                print("{} Saving checkpoint of model...".format(
                    datetime.now()))

                #save checkpoint of the model
                if epoch % display_step == 0:
                    checkpoint_name = os.path.join(
                        snapshot, 'model_epoch' + str(epoch) + '.ckpt')
                    save_path = saver.save(sess, checkpoint_name)
                    print("{} Model checkpoint saved at {}".format(
                        datetime.now(), checkpoint_name))
    def predict_batch(self, val_list, mean, weight_file, result_file):
        in_img_size = (227, 227)  #(height, width)
        dropout_rate = 0.5
        num_classes = 2
        train_layers = []

        x = tf.placeholder(tf.float32, [1, in_img_size[0], in_img_size[1], 3])
        y = tf.placeholder(tf.float32, [None, num_classes])

        model = alexnet(x,
                        1.,
                        num_classes,
                        train_layers,
                        in_size=in_img_size,
                        weights_path=weight_file)
        score = model.fc8
        softmax = tf.nn.softmax(score)

        val_generator = ImageDataGenerator(val_list,
                                           horizontal_flip=False,
                                           shuffle=False,
                                           mean=mean,
                                           scale_size=in_img_size,
                                           nb_classes=num_classes)

        precision = np.zeros((num_classes + 1, num_classes), dtype=np.float)
        total_presion = 0.

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            tf.train.Saver().restore(sess, weight_file)

            self._start_end_time[0] = time.clock()
            for index in range(val_generator.data_size):
                print 'handing %d / %d ...\r' % (index + 1,
                                                 val_generator.data_size),

                img_ = val_generator.images[index]
                label = val_generator.labels[index]
                img = cv2.imread(img_)
                img = cv2.resize(
                    img,
                    (val_generator.scale_size[1], val_generator.scale_size[0]))
                img = img.reshape(1, val_generator.scale_size[0],
                                  val_generator.scale_size[1], 3)
                img = img.astype(np.float32)

                probs = sess.run(softmax, feed_dict={x: img})
                guess = np.argmax(probs)
                if guess == label:
                    precision[guess][guess] += 1
                    total_presion += 1
                else:
                    precision[guess][int(val_generator.labels[index])] += 1
            self._start_end_time[1] = time.clock()

            for i in range(num_classes):
                for j in range(num_classes):
                    precision[num_classes][i] += precision[j][i]
            for i in range(num_classes):
                for j in range(num_classes):
                    precision[i][j] /= precision[num_classes][j]
            total_presion /= val_generator.data_size

            slaped = (self._start_end_time[1] -
                      self._start_end_time[0]) / val_generator.data_size

            file = open(result_file, 'w')
            file.write('model: ' + weight_file + '\n')
            print '\n#####################################################################'
            file.writelines([
                '################################################################\n'
            ])
            text_ = ''
            for i in range(num_classes):
                print '        %d' % i,
                text_ += '        %d' % i
            print '\n'
            file.write(text_ + '\n')
            for i in range(num_classes):
                print '  %d' % i,
                file.write('  ' + str(i))
                for j in range(num_classes):
                    str_preci = '    %.2f' % precision[i][j]
                    print '  %.2f  ' % precision[i][j],
                    file.write(str_preci)
                print '\n'
                file.write('\n')
            print '\ntotal precision: %.2f' % total_presion
            print 'average speed: %.4f / image' % slaped
            str_preci = 'total precision: %.2f' % total_presion
            file.writelines(['\n' + str_preci + '\n'])
            str_slaped = 'average speed: %.4f s / image' % slaped
            file.write(str_slaped + '\n')
            file.close()
import tensorflow as tf
import tflearn.datasets.oxflower17 as oxflowerData
from alexnet_model import alexnet
import matplotlib.pyplot as plt

# Download data
(data, labels) = oxflowerData.load_data(one_hot=True)

# import Alexnet model
nn_model = alexnet(data.shape[1:], labels.shape[1])

# Compile model
nn_model.compile(optimizer=tf.train.AdamOptimizer(),
                 loss='categorical_crossentropy',
                 metrics=['accuracy'])

# Train model (20% data used for validation)
history = nn_model.fit(data,
                       labels,
                       batch_size=64,
                       epochs=100,
                       verbose=1,
                       validation_split=0.2,
                       shuffle=True)

# plot training & validation results
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']