예제 #1
0
def main():
    parser = argparse.ArgumentParser(description='Evaluate CaffeNet')
    parser.add_argument('--batch-size', type=int, default=20,
                        help='input batch size for training')
    parser.add_argument('--data-dir', type=str, default='./data/VOCdevkit/VOC2007',
                        help='Path to PASCAL data storage')
    args = parser.parse_args()

    test_images, test_labels, test_weights = util.load_pascal(args.data_dir,
                                                              class_names=CLASS_NAMES,
                                                              split='test')


    test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels, test_weights))
    test_dataset = test_dataset.map(center_crop_test_data)
    test_dataset = test_dataset.batch(args.batch_size)

    model = CaffeNet(num_classes=len(CLASS_NAMES))

    checkpoint = tf.train.Checkpoint(model=model)
    status = checkpoint.restore(tf.train.latest_checkpoint('pascal_caffenet'))

    AP, mAP = util.eval_dataset_map(model, test_dataset)
    rand_AP = util.compute_ap(
        test_labels, np.random.random(test_labels.shape),
        test_weights, average=None)
    print('Random AP: {} mAP'.format(np.mean(rand_AP)))
    gt_AP = util.compute_ap(test_labels, test_labels, test_weights, average=None)
    print('GT AP: {} mAP'.format(np.mean(gt_AP)))
    print('Obtained {} mAP'.format(mAP))
    print('Per class:')
    for cid, cname in enumerate(CLASS_NAMES):
        print('{}: {}'.format(cname, util.get_el(AP, cid)))
예제 #2
0
def test(model, dataset):
    test_loss = tfe.metrics.Mean()
    accuracy = []
    for batch, (images, labels, weights) in enumerate(dataset):
        images, labels, weights = center_crop(images, labels, weights)
        logits = model(images)
        loss_value = tf.losses.sigmoid_cross_entropy(labels, logits, weights)
        prediction = tf.math.sigmoid(logits)
        # embed()
        if np.sum(prediction.numpy()) == 0:
            pass
        else:
            accuracy.append(
                util.compute_ap(labels.numpy(),
                                prediction.numpy(),
                                weights.numpy(),
                                average=None))
            test_loss(loss_value)
        # print(batch)
        # print(np.sum(prediction.numpy()))
    accuracy_mean = np.nanmean(accuracy)
    return test_loss.result(), accuracy_mean
예제 #3
0
def main():
    parser = argparse.ArgumentParser(description='VGG Fine Tune')
    parser.add_argument('--batch-size',
                        type=int,
                        default=20,
                        help='input batch size for training')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        help='number of epochs to train')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        help='learning rate')
    parser.add_argument('--seed', type=int, default=1, help='random seed')
    parser.add_argument('--log-interval',
                        type=int,
                        default=60,
                        help='how many batches to wait before'
                        ' logging training status')
    parser.add_argument('--eval-interval',
                        type=int,
                        default=60,
                        help='how many batches to wait before'
                        ' evaluate the model')
    parser.add_argument('--log-dir',
                        type=str,
                        default='tb',
                        help='path for logging directory')
    parser.add_argument('--data-dir',
                        type=str,
                        default='./data/VOCdevkit/VOC2007',
                        help='Path to PASCAL data storage')
    args = parser.parse_args()
    util.set_random_seed(args.seed)
    sess = util.set_session()

    train_images, train_labels, train_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='trainval')
    test_images, test_labels, test_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='test')

    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_images, train_labels, train_weights))
    train_dataset = train_dataset.map(augment_train_data)
    train_dataset = train_dataset.shuffle(10000).batch(args.batch_size)

    test_dataset = tf.data.Dataset.from_tensor_slices(
        (test_images, test_labels, test_weights))
    test_dataset = test_dataset.map(center_crop_test_data)
    test_dataset = test_dataset.batch(args.batch_size)

    model = VGG(num_classes=len(CLASS_NAMES))

    logdir = os.path.join(args.log_dir,
                          datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    if os.path.exists(logdir):
        shutil.rmtree(logdir)
    os.makedirs(logdir)
    writer = tf.contrib.summary.create_file_writer(logdir)
    writer.set_as_default()

    tf.contrib.summary.initialize()

    global_step = tf.train.get_or_create_global_step()

    train_log = {'iter': [], 'loss': [], 'accuracy': []}
    test_log = {'iter': [], 'loss': [], 'accuracy': []}

    ckpt_dir = 'pascal_vgg_ft'
    ckpt_prefix = os.path.join(ckpt_dir, 'ckpt')
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)

    # Build model first to load weights
    input_shape = tf.TensorShape([None, 224, 224, 3])
    model.build(input_shape)

    model.load_weights('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                       by_name=True)

    # Print layer names in saved weights
    # f = h5py.File('vgg16_weights_tf_dim_ordering_tf_kernels.h5', 'r')

    # # Get the data
    # for i in list(f.keys()):
    # print(i)

    decayed_lr = tf.train.exponential_decay(args.lr,
                                            global_step,
                                            1000,
                                            0.5,
                                            staircase=True)
    optimizer = tf.train.MomentumOptimizer(learning_rate=decayed_lr(),
                                           momentum=0.9)

    root = tf.train.Checkpoint(optimizer=optimizer, model=model)

    for ep in range(args.epochs):
        epoch_loss_avg = tfe.metrics.Mean()
        for batch, (images, labels, weights) in enumerate(train_dataset):
            loss_value, grads = util.cal_grad(
                model,
                loss_func=tf.losses.sigmoid_cross_entropy,
                inputs=images,
                targets=labels,
                weights=weights)

            grads_and_vars = zip(grads, model.trainable_variables)
            optimizer.apply_gradients(grads_and_vars, global_step)

            epoch_loss_avg(loss_value)
            if global_step.numpy() % args.log_interval == 0:
                print(
                    'Epoch: {0:d}/{1:d} Iteration:{2:d}  Training Loss:{3:.4f}'
                    .format(ep, args.epochs, global_step.numpy(),
                            epoch_loss_avg.result()))
                train_log['iter'].append(global_step.numpy())
                train_log['loss'].append(epoch_loss_avg.result())

                with tf.contrib.summary.always_record_summaries():
                    tf.contrib.summary.scalar('Training Loss', loss_value)
                    tf.contrib.summary.image('RGB', images)
                    tf.contrib.summary.scalar('LR', decayed_lr())

                    for i, variable in enumerate(model.trainable_variables):
                        tf.contrib.summary.histogram("grad_" + variable.name,
                                                     grads[i])

            if global_step.numpy() % args.eval_interval == 0:
                test_AP, test_mAP = util.eval_dataset_map(model, test_dataset)
                test_loss = test(model, test_dataset)
                print("mAP: ", test_mAP)
                print("Test Loss: ", test_loss)
                # print("Loss: %.4f, Acc: %.4f, mAP: %.4f", test_lotest_mAP)
                with tf.contrib.summary.always_record_summaries():
                    tf.contrib.summary.scalar('Test mAP', test_mAP)
                    tf.contrib.summary.scalar('Test Loss', test_loss)

        if ep % 2 == 0:
            root.save(ckpt_prefix)

    root.save(ckpt_prefix)
    model.summary()

    AP, mAP = util.eval_dataset_map(model, test_dataset)
    rand_AP = util.compute_ap(test_labels,
                              np.random.random(test_labels.shape),
                              test_weights,
                              average=None)
    print('Random AP: {} mAP'.format(np.mean(rand_AP)))
    gt_AP = util.compute_ap(test_labels,
                            test_labels,
                            test_weights,
                            average=None)
    print('GT AP: {} mAP'.format(np.mean(gt_AP)))
    print('Obtained {} mAP'.format(mAP))
    print('Per class:')
    for cid, cname in enumerate(CLASS_NAMES):
        print('{}: {}'.format(cname, util.get_el(AP, cid)))
예제 #4
0
def main():
    parser = argparse.ArgumentParser(description='TensorFlow Pascal Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=20,
                        help='input batch size for training')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        help='number of epochs to train')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='learning rate')
    parser.add_argument('--seed', type=int, default=1, help='random seed')
    parser.add_argument('--log-interval',
                        type=int,
                        default=10,
                        help='how many batches to wait before'
                        ' logging training status')
    parser.add_argument('--eval-interval',
                        type=int,
                        default=20,
                        help='how many batches to wait before'
                        ' evaluate the model')
    parser.add_argument('--log-dir',
                        type=str,
                        default='tb',
                        help='path for logging directory')
    parser.add_argument('--data-dir',
                        type=str,
                        default='./VOCdevkit/VOC2007',
                        help='Path to PASCAL data storage')
    args = parser.parse_args()
    util.set_random_seed(args.seed)
    sess = util.set_session()
    img_save_interval = 200

    train_images, train_labels, train_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='trainval')
    test_images, test_labels, test_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='test')

    ## TODO modify the following code to apply data augmentation here
    ori_h = train_images.shape[1]
    ori_w = train_images.shape[2]
    crop_h = 224
    crop_w = 224
    central_fraction = 0.7

    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_images, train_labels, train_weights))
    test_dataset = tf.data.Dataset.from_tensor_slices(
        (test_images, test_labels, test_weights))

    train_dataset_aug_flip = train_dataset.map(
        lambda img, l, w: (tf.image.random_flip_left_right(img), l, w))
    train_dataset_aug_crop = train_dataset_aug_flip.map(
        lambda img, l, w: (tf.random_crop(img, [crop_h, crop_w, 3]), l, w))

    train_dataset.concatenate(train_dataset_aug_flip)

    test_dataset_aug = test_dataset.map(
        lambda img, l, w: (tf.image.central_crop(img, central_fraction), l, w))
    test_dataset_aug = test_dataset_aug.map(
        lambda img, l, w: (tf.image.resize_images(img, (ori_h, ori_w)), l, w))

    test_dataset.concatenate(test_dataset_aug)

    train_dataset = train_dataset.map(lambda img, l, w:
                                      (img_mean_substract(img), l, w))
    test_dataset = test_dataset.map(lambda img, l, w:
                                    (img_mean_substract(img), l, w))

    train_dataset = train_dataset.shuffle(10000).batch(args.batch_size)
    test_dataset = test_dataset.batch(args.batch_size)

    model = SimpleCNN(num_classes=len(CLASS_NAMES))

    logdir = os.path.join(args.log_dir,
                          datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))

    checkpoint_dir = os.path.join(logdir, "ckpt")

    if os.path.exists(logdir):
        shutil.rmtree(logdir)
    os.makedirs(logdir)
    writer = tf.contrib.summary.create_file_writer(logdir)
    writer.set_as_default()

    ## TODO write the training and testing code for multi-label classification
    global_step = tf.train.get_or_create_global_step()
    learning_rate = tf.train.exponential_decay(args.lr,
                                               global_step,
                                               5000,
                                               0.5,
                                               staircase=True)
    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                           momentum=0.9)
    checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
    train_log = {'iter': [], 'loss': [], 'accuracy': []}
    test_log = {'iter': [], 'loss': [], 'accuracy': []}

    for ep in range(args.epochs):
        epoch_loss_avg = tfe.metrics.Mean()

        for batch, (images, labels, weights) in enumerate(train_dataset):

            loss_value, grads = util.cal_grad(
                model,
                loss_func=tf.losses.sigmoid_cross_entropy,
                inputs=images,
                weights=weights,
                targets=labels)
            optimizer.apply_gradients(zip(grads, model.trainable_variables),
                                      global_step)
            epoch_loss_avg(loss_value)

            if global_step.numpy() % args.log_interval == 0:

                print(
                    'Epoch: {0:d}/{1:d} Iteration:{2:d}  Training Loss:{3:.4f}  '
                    .format(ep, args.epochs, global_step.numpy(),
                            epoch_loss_avg.result()))
                train_log['iter'].append(global_step.numpy())
                train_log['loss'].append(epoch_loss_avg.result())

                # Tensorboard Visualization
                with tf.contrib.summary.always_record_summaries():
                    tf.contrib.summary.scalar('training_loss',
                                              epoch_loss_avg.result())
                    #tf.contrib.summary.scalar('learning_rate', learning_rate())
                    # for grad,var in zip(grads,model.trainable_variables):
                    #     tf.contrib.summary.histogram("gradients_{0}".format(var.name), grad)

            if global_step.numpy() % args.eval_interval == 0:
                with tf.contrib.summary.always_record_summaries():
                    test_AP, test_mAP = util.eval_dataset_map(
                        model, test_dataset)
                    tf.contrib.summary.scalar('test_map', test_mAP)
                    #test_loss = test(test_dataset,model)
                    #tf.contrib.summary.scalar('testing_loss', test_loss)

            # if global_step.numpy() % img_save_interval == 0:
            #     with tf.contrib.summary.always_record_summaries():
            #         tf.contrib.summary.image('training_img', images)

        # Save checkpoints
        checkpoint.save(file_prefix=checkpoint_dir)

    AP, mAP = util.eval_dataset_map(model, test_dataset)
    # For visualization

    rand_AP = util.compute_ap(test_labels,
                              np.random.random(test_labels.shape),
                              test_weights,
                              average=None)
    print('Random AP: {} mAP'.format(np.mean(rand_AP)))
    gt_AP = util.compute_ap(test_labels,
                            test_labels,
                            test_weights,
                            average=None)
    print('GT AP: {} mAP'.format(np.mean(gt_AP)))
    print('Obtained {} mAP'.format(mAP))
    print('Per class:')
    for cid, cname in enumerate(CLASS_NAMES):
        print('{}: {}'.format(cname, util.get_el(AP, cid)))
예제 #5
0
def test_all(model,
             valid_path,
             epoch,
             batch_size,
             num_classes=11,
             n_cpu=8,
             iou_thres=0.5,
             img_size=512,
             cuda=True):
    # Get dataloader
    # when training the data without multi-scale
    dataset = ListDatasetraw(valid_path, img_size)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=n_cpu)

    # when training the data with multi-scale
    # transform_test = my_transform(image_size=(1200, 1920))
    # dataset = ListDataset(valid_path, img_size, transform=transform_test.image_transforms_original_image(),
    #                       target_transform=transform_test.target_transform_org)
    # dataloader = torch.utils.data.DataLoader(dataset,
    #                                          batch_size=batch_size, shuffle=False, num_workers=n_cpu)

    # get hyper-params
    conf_thresh = 0.9
    NMS_thresh = 0.1
    Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

    file_name = './test_log.txt'
    f = open(file_name, 'a')
    f.write('Epoch%d Compute mAP...\n' % (epoch))
    f.write('IoU threshold: %.4f\n' % (iou_thres))
    print('Compute mAP...')

    targets = None
    APs = []
    for batch_i, (_, imgs, targets) in enumerate(dataloader):

        targets = targets.type(Tensor)
        imgs = imgs.type(Tensor)

        with torch.no_grad():
            output = model(imgs, None)
            output = non_max_suppression(output,
                                         num_classes,
                                         conf_thres=conf_thresh,
                                         nms_thres=NMS_thresh,
                                         cls_dependent=False)

        # Compute average precision for each sample
        for sample_i in range(targets.size(0)):
            correct = []

            # Get labels for sample where width is not zero (dummies)
            annotations = targets[sample_i, targets[sample_i, :, 3] != 0]
            # Extract detections
            detections = output[sample_i]

            if detections is None:
                if annotations.size(0) == 0:
                    continue
                    APs.append(1)
                    print("+ Sample [%d/%d] AP: %.4f (%.4f)" %
                          (len(APs), len(dataset), 1, np.mean(APs)))
                # If there are no detections but there are annotations mask as zero AP
                if annotations.size(0) != 0:
                    APs.append(0)
                    print("+ Sample [%d/%d] AP: %.4f (%.4f)" %
                          (len(APs), len(dataset), 0, np.mean(APs)))
                continue

            if detections.size(0) == 0:
                if annotations.size(0) == 0:
                    APs.append(1)
                    print("+ Sample [%d/%d] AP: %.4f (%.4f)" %
                          (len(APs), len(dataset), 1, np.mean(APs)))
                # If there are no detections but there are annotations mask as zero AP
                if annotations.size(0) != 0:
                    APs.append(0)
                    print("+ Sample [%d/%d] AP: %.4f (%.4f)" %
                          (len(APs), len(dataset), 0, np.mean(APs)))
                continue

            # Get detections sorted by decreasing confidence scores
            detections = detections[np.argsort(-detections[:, 4])]

            # If no annotations add number of detections as incorrect
            if annotations.size(0) == 0:
                correct.extend([0 for _ in range(len(detections))])
            else:
                # Extract target boxes as (x1, y1, x2, y2)
                target_boxes = torch.FloatTensor(annotations[:, 1:].shape)
                target_boxes[:,
                             0] = (annotations[:, 1] - annotations[:, 3] / 2)
                target_boxes[:,
                             1] = (annotations[:, 2] - annotations[:, 4] / 2)
                target_boxes[:,
                             2] = (annotations[:, 1] + annotations[:, 3] / 2)
                target_boxes[:,
                             3] = (annotations[:, 2] + annotations[:, 4] / 2)
                target_boxes *= img_size

                detected = []
                for *pred_bbox, conf, obj_conf, obj_pred in detections:

                    pred_bbox = torch.FloatTensor(pred_bbox).view(1, -1)
                    # Compute iou with target boxes
                    iou = bbox_ious(pred_bbox, target_boxes)
                    # Extract index of largest overlap
                    best_i = np.argmax(iou)
                    # If overlap exceeds threshold and classification is correct mark as correct
                    if iou[best_i] > iou_thres and obj_pred == annotations[
                            best_i, 0] and best_i not in detected:
                        correct.append(1)
                        detected.append(best_i)
                    else:
                        correct.append(0)

            # Extract true and false positives
            true_positives = np.array(correct)
            false_positives = 1 - true_positives

            # Compute cumulative false positives and true positives
            false_positives = np.cumsum(false_positives)
            true_positives = np.cumsum(true_positives)

            # Compute recall and precision at all ranks
            recall = true_positives / annotations.size(0) if annotations.size(
                0) else true_positives
            precision = true_positives / np.maximum(
                true_positives + false_positives,
                np.finfo(np.float64).eps)

            # Compute average precision
            AP = compute_ap(recall, precision)
            APs.append(AP)
            #            f.write("+ Sample [%d/%d] AP: %.4f (%.4f)" % (len(APs), len(dataset), AP, np.mean(APs)))
            #            f.write('\n')
            print("+ Sample [%d/%d] AP: %.4f (%.4f)" %
                  (len(APs), len(dataset), AP, np.mean(APs)))
    f.write("Mean Average Precision: %.4f" % np.mean(APs))
    f.write('\n')
    f.close()
    print("Mean Average Precision: %.4f" % np.mean(APs))
    def evaluate(self,
                 test_imgs,
                 iou_threshold=0.3,
                 obj_threshold=0.2,
                 nms_threshold=0.3):

        print(
            'evaulating the model with iou_threshold={}, obj_threshold={}, nms_threshold={}'
            .format(iou_threshold, obj_threshold, nms_threshold))
        self.Yolo.train(False)

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': 1,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        # evaluation has to be in the eval stage

        generator = data_generator(test_imgs,
                                   generator_config,
                                   norm=self.Yolo.normalize,
                                   jitter=False)

        # gather all detections and annotations
        all_detections = [[None for i in range(generator.num_classes())]
                          for j in range(len(generator))]
        all_annotations = [[None for i in range(generator.num_classes())]
                           for j in range(len(generator))]

        for i in tqdm(range(len(generator))):
            raw_image = generator.load_image(i)
            raw_height, raw_width, raw_channels = raw_image.shape

            # make the boxes and the labels
            pred_boxes = self.predict(raw_image, obj_threshold, nms_threshold)

            if i < 40:
                image_bbox = draw_boxes_object(raw_image, pred_boxes,
                                               self.labels)
                cv2.imwrite(
                    './sample/image_pred_box/test_pred_{}.png'.format(i),
                    image_bbox)

            score = np.array([box.score for box in pred_boxes])
            pred_labels = np.array([box.label for box in pred_boxes])

            if len(pred_boxes) > 0:
                pred_boxes = np.array([[
                    box.xmin * raw_width, box.ymin * raw_height,
                    box.xmax * raw_width, box.ymax * raw_height, box.score
                ] for box in pred_boxes])
            else:
                pred_boxes = np.array([[]])

            # sort the boxes and the labels according to scores
            score_sort = np.argsort(-score)
            pred_labels = pred_labels[score_sort]
            pred_boxes = pred_boxes[score_sort]

            # copy detections to all_detections
            for label in range(generator.num_classes()):
                all_detections[i][label] = pred_boxes[pred_labels == label, :]

            annotations = generator.load_annotation(i)

            # copy detections to all_annotations
            for label in range(generator.num_classes()):
                all_annotations[i][label] = annotations[annotations[:, 4] ==
                                                        label, :4].copy()

        # compute mAP by comparing all detections and all annotations
        average_precisions = {}

        for label in range(generator.num_classes()):
            false_positives = np.zeros((0, ))
            true_positives = np.zeros((0, ))
            scores = np.zeros((0, ))
            num_annotations = 0.0

            for i in range(len(generator)):
                detections = all_detections[i][label]
                annotations = all_annotations[i][label]
                num_annotations += annotations.shape[0]
                detected_annotations = []

                for d in detections:
                    scores = np.append(scores, d[4])

                    if annotations.shape[0] == 0:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)
                        continue

                    overlaps = compute_overlap(np.expand_dims(d, axis=0),
                                               annotations)
                    assigned_annotation = np.argmax(overlaps, axis=1)
                    max_overlap = overlaps[0, assigned_annotation]

                    if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                        false_positives = np.append(false_positives, 0)
                        true_positives = np.append(true_positives, 1)
                        detected_annotations.append(assigned_annotation)
                    else:
                        false_positives = np.append(false_positives, 1)
                        true_positives = np.append(true_positives, 0)

            # no annotations -> AP for this class is 0 (is this correct?)
            if num_annotations == 0:
                average_precisions[label] = 0
                continue

            # sort by score
            indices = np.argsort(-scores)
            false_positives = false_positives[indices]
            true_positives = true_positives[indices]

            # compute false positives and true positives
            false_positives = np.cumsum(false_positives)
            true_positives = np.cumsum(true_positives)

            # compute recall and precision
            recall = true_positives / num_annotations
            precision = true_positives / np.maximum(
                true_positives + false_positives,
                np.finfo(np.float64).eps)

            # compute average precision
            average_precision = compute_ap(recall, precision)
            average_precisions[label] = average_precision

        # for label, average_precision in average_precisions.items():
        #     print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))

        return average_precisions
예제 #7
0
def main():
    parser = argparse.ArgumentParser(description='TensorFlow Pascal Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=20,
                        help='input batch size for training')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        help='number of epochs to train')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        help='learning rate')
    parser.add_argument('--seed', type=int, default=1, help='random seed')
    parser.add_argument('--log-interval',
                        type=int,
                        default=10,
                        help='how many batches to wait before'
                        ' logging training status')
    parser.add_argument('--eval-interval',
                        type=int,
                        default=60,
                        help='how many batches to wait before'
                        ' evaluate the model')
    parser.add_argument('--log-dir',
                        type=str,
                        default='tb/05',
                        help='path for logging directory')
    parser.add_argument('--data-dir',
                        type=str,
                        default='./VOCdevkit/VOC2007',
                        help='Path to PASCAL data storage')
    parser.add_argument('--checkpoint-dir',
                        type=str,
                        default='./checkpoints/06',
                        help='Path to checkpoints storage')
    parser.add_argument(
        '--save-interval',
        type=int,
        default=2,
        help='How many batch to wait before storing checkpoints')
    parser.add_argument(
        '--pretrain-dir',
        type=str,
        default=
        './pre_trained_model/vgg16_weights_tf_dim_ordering_tf_kernels.h5',
        help='path the pretrained model')
    parser.add_argument('--scratch-dir',
                        type=str,
                        default='./checkpoints/04/ckpt.h5',
                        help='path the scratched model')
    args = parser.parse_args()
    util.set_random_seed(args.seed)
    sess = util.set_session()

    model = SimpleCNN(pretrain_dir=args.pretrain_dir,
                      scratch_dir=args.scratch_dir,
                      num_classes=len(CLASS_NAMES))

    train_images, train_labels, train_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='trainval')
    test_images, test_labels, test_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='test')
    # np.random.seed(1)
    # images_mix = train_images
    # np.random.shuffle(images_mix)

    # np.random.seed(1)
    # labels_mix = train_labels
    # np.random.shuffle(labels_mix)

    # np.random.seed(1)
    # weights_mix = train_weights
    # np.random.shuffle(weights_mix)

    # lamb = np.random.beta(2., 2.)

    # train_images=train_images * lamb + images_mix * (1-lamb)
    # train_labels=train_labels * lamb + labels_mix * (1-lamb)
    # train_weights=train_weights * lamb + weights_mix * (1-lamb)

    ## TODO modify the following code to apply data augmentation here
    print('start_loading!')
    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_images, train_labels, train_weights))
    train_dataset = train_dataset.shuffle(10000).batch(args.batch_size)

    test_dataset = tf.data.Dataset.from_tensor_slices(
        (test_images, test_labels, test_weights))
    test_dataset = test_dataset.batch(50)

    train_dataset_mix = tf.data.Dataset.from_tensor_slices(
        (train_images, train_labels, train_weights))
    train_dataset_mix = train_dataset_mix.shuffle(10000).batch(args.batch_size)

    logdir = os.path.join(args.log_dir,
                          datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    if os.path.exists(logdir):
        shutil.rmtree(logdir)
    os.makedirs(logdir)
    writer = tf.contrib.summary.create_file_writer(logdir)
    writer.set_as_default()

    ## TODO write the training and testing code for multi-label classification
    global_step = tf.train.get_or_create_global_step()
    learning_rate_decay = tf.train.exponential_decay(args.lr, global_step,
                                                     1000, 0.5)
    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate_decay,
                                           momentum=0.9)

    train_log = {'iter': [], 'loss': []}
    test_log = {'iter': [], 'loss': [], 'accuracy': []}
    print('start training!')

    for ep in range(args.epochs):
        epoch_loss_avg = tfe.metrics.Mean()
        # epoch_accuracy = tfe.metrics.Accuracy()
        for batch, ((images, labels, weights),
                    (images_mix, labels_mix, weights_mix)) in enumerate(
                        zip(train_dataset, train_dataset_mix)):
            # print(labels - labels_mix)

            labels = tf.cast(labels, tf.float32)

            labels_mix = tf.cast(labels_mix, tf.float32)

            weights = tf.cast(weights, tf.float32)

            weights_mix = tf.cast(weights_mix, tf.float32)

            lamb_size = images.shape[0]

            lamb = np.random.beta(0.2, 0.2, lamb_size)

            # print(lamb)

            images = images * lamb[:, np.newaxis, np.newaxis,
                                   np.newaxis] + images_mix * (
                                       1 - lamb)[:, np.newaxis, np.newaxis,
                                                 np.newaxis]
            # print(images.shape)
            weights = weights * lamb[:, np.newaxis] + weights_mix * (
                1. - lamb)[:, np.newaxis]

            labels = labels * lamb[:, np.newaxis] + labels_mix * (
                1. - lamb)[:, np.newaxis]

            # print(labels * lamb[:, np.newaxis])

            # print(labels.dtype)

            images, labels, weights = mean_normalization(
                images, labels, weights)
            images, labels, weights = randomly_crop(images, labels, weights)
            images, labels, weights = randomly_flip(images, labels, weights)

            # print(images[0])
            # print(labels)
            # print(weights.shape)

            with tf.contrib.summary.record_summaries_every_n_global_steps(100):
                tf.contrib.summary.image("sample_image", images, max_images=3)

            loss_value, grads = util.cal_grad(
                model,
                loss_func=tf.losses.sigmoid_cross_entropy,
                inputs=images,
                targets=labels,
                weights=weights)
            optimizer.apply_gradients(zip(grads, model.trainable_variables),
                                      global_step)
            learning_rate_decay = tf.train.exponential_decay(
                args.lr, global_step, 1000, 0.5)
            with tf.contrib.summary.record_summaries_every_n_global_steps(1):
                tf.contrib.summary.scalar('learning_rate',
                                          learning_rate_decay())

            with tf.contrib.summary.record_summaries_every_n_global_steps(10):
                for grad, var in zip(grads, model.trainable_variables):
                    tf.contrib.summary.histogram(
                        "{}/grad_histogram".format(var.name), grad)

            with tf.contrib.summary.record_summaries_every_n_global_steps(1):
                tf.contrib.summary.scalar('training_loss', loss_value)

            epoch_loss_avg(loss_value)
            if global_step.numpy() % args.log_interval == 0:
                print(
                    'Epoch: {0:d}/{1:d} Iteration:{2:d}  Training Loss:{3:.4f}'
                    .format(ep, args.epochs, global_step.numpy(),
                            epoch_loss_avg.result()))
                train_log['iter'].append(global_step.numpy())
                train_log['loss'].append(epoch_loss_avg.result())
                # tf.contrib.summary.scalar('training_loss', epoch_loss_avg.result())
                # train_log['accuracy'].append(epoch_accuracy.result())
            if global_step.numpy() % args.eval_interval == 0:
                test_loss, test_acc = test(model, test_dataset)
                with tf.contrib.summary.record_summaries_every_n_global_steps(
                        args.eval_interval):
                    tf.contrib.summary.scalar('testing_acc', test_acc)
                test_log['iter'].append(global_step.numpy())
                test_log['loss'].append(test_loss)
                test_log['accuracy'].append(test_acc)
                # tf.contrib.summary.scalar('testing_loss', test_loss)
                # tf.contrib.summary.scalar('testing_loss', test_acc)
                print(
                    'Epoch: {0:d}/{1:d} Iteration:{2:d}  Testing Loss:{3:.4f} Testing Accuracy:{4:.4f}'
                    .format(ep, args.epochs, global_step.numpy(), test_loss,
                            test_acc))
        # if global_step.numpy() % args.save_epoch == 0:
        #     checkpoint = tfe.Checkpoint(optimizer=optimizer,
        #                         model=model,
        #                         optimizer_step=tf.train.get_or_create_global_step())
        #     checkpoint_prefix = os.path.join(args.checkpoint_dir, "ckpt")
        #     checkpoint.save(file_prefix=checkpoint_prefix)

    AP, mAP = util.eval_dataset_map(model, test_dataset)
    rand_AP = util.compute_ap(test_labels,
                              np.random.random(test_labels.shape),
                              test_weights,
                              average=None)

    # checkpoint = tfe.Checkpoint(optimizer=optimizer,
    #                             model=model,
    #                             optimizer_step=tf.train.get_or_create_global_step())
    # checkpoint_prefix = os.path.join(args.checkpoint_dir, "ckpt")
    # checkpoint.save(file_prefix=checkpoint_prefix)
    checkpoint_prefix = os.path.join(args.checkpoint_dir, "ckpt.h5")
    model.save_weights(checkpoint_prefix)

    print('Random AP: {} mAP'.format(np.mean(rand_AP)))
    gt_AP = util.compute_ap(test_labels,
                            test_labels,
                            test_weights,
                            average=None)
    print('GT AP: {} mAP'.format(np.mean(gt_AP)))
    print('Obtained {} mAP'.format(mAP))
    print('Per class:')
    for cid, cname in enumerate(CLASS_NAMES):
        print('{}: {}'.format(cname, util.get_el(AP, cid)))
    writer.close()
def main():
    parser = argparse.ArgumentParser(description='TensorFlow Pascal Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=20,
                        help='input batch size for training')
    parser.add_argument('--epochs',
                        type=int,
                        default=30,
                        help='number of epochs to train')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='learning rate')
    parser.add_argument('--seed', type=int, default=1, help='random seed')
    parser.add_argument('--log-interval',
                        type=int,
                        default=10,
                        help='how many batches to wait before'
                        ' logging training status')
    parser.add_argument('--eval-interval',
                        type=int,
                        default=250,
                        help='how many batches to wait before'
                        ' evaluate the model')
    parser.add_argument('--log-dir',
                        type=str,
                        default='pascal_caffenet_tb',
                        help='path for logging directory')
    parser.add_argument('--data-dir',
                        type=str,
                        default='./VOCdevkit/VOC2007',
                        help='Path to PASCAL data storage')
    args = parser.parse_args()
    util.set_random_seed(args.seed)
    sess = util.set_session()

    splt = "trainval"
    trainval_npz = splt + '.npz'
    test_npz = 'test.npz'

    if (os.path.isfile(trainval_npz)):
        print("\nFound trainval npz file\n")
        with np.load(trainval_npz) as tr_npzfile:
            train_images = tr_npzfile['imgs']
            train_labels = tr_npzfile['labels']
            train_weights = tr_npzfile['weights']
    else:

        train_images, train_labels, train_weights = util.load_pascal(
            args.data_dir, class_names=CLASS_NAMES, split=splt)
        np.savez(trainval_npz,
                 imgs=train_images,
                 labels=train_labels,
                 weights=train_weights)

    ##TEST##
    if (os.path.isfile(test_npz)):
        print("\nFound test npz file\n")
        # npzfile = np.load(test_npz)
        with np.load(test_npz) as test_npzfile:
            test_images = test_npzfile['imgs']
            test_labels = test_npzfile['labels']
            test_weights = test_npzfile['weights']
    else:
        test_images, test_labels, test_weights = util.load_pascal(
            args.data_dir, class_names=CLASS_NAMES, split='test')
        np.savez(test_npz,
                 imgs=test_images,
                 labels=test_labels,
                 weights=test_weights)

    ## TODO modify the following code to apply data augmentation here
    rgb_mean = np.array([123.68, 116.78, 103.94], dtype=np.float32) / 256.0
    train_images = (train_images - rgb_mean).astype(np.float32)
    test_images = (test_images - rgb_mean).astype(np.float32)

    flip_fn = lambda img, lbl, wts: flip(img, lbl, wts)
    crop_fn = lambda img, lbl, wts: crop(img, lbl, wts)
    ccrop_fn = lambda img, lbl, wts: center_crop(img, lbl, wts)
    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_images, train_labels, train_weights))
    flipped_train = train_dataset.map(flip_fn, num_parallel_calls=4)
    train_dataset = train_dataset.concatenate(flipped_train)
    train_dataset = train_dataset.map(crop_fn, num_parallel_calls=4)

    train_dataset = train_dataset.shuffle(10000).batch(args.batch_size)
    test_dataset = tf.data.Dataset.from_tensor_slices(
        (test_images, test_labels, test_weights))
    test_dataset = test_dataset.map(ccrop_fn, num_parallel_calls=4)
    test_dataset = test_dataset.batch(args.batch_size)

    model = SimpleCNN(num_classes=len(CLASS_NAMES))

    logdir = os.path.join(args.log_dir,
                          datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    if os.path.exists(logdir):
        shutil.rmtree(logdir)
    os.makedirs(logdir)

    writer = tf.contrib.summary.create_file_writer(logdir)
    writer.set_as_default()
    tf.contrib.summary.initialize()

    global_step = tf.train.get_or_create_global_step()
    # optimizer = tf.train.AdamOptimizer(learning_rate=args.lr)

    ##decay lr using callback
    learning_rate = tf.Variable(args.lr)
    decay_interval = 5000
    # decay_op = tf.train.exponential_decay(args.lr,global_step,decay_interval,0.5)
    ##optimizer : sgd , momentum, 0.9
    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                           momentum=0.9)
    train_log = {'iter': [], 'loss': []}
    test_log = {'iter': [], 'mAP': []}
    checkpoint_directory = "./03_pascal_caffenet/"
    if not os.path.exists(checkpoint_directory):
        os.makedirs(checkpoint_directory)
    checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
    checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
    # pdb.set_trace()
    latest = tf.train.latest_checkpoint(checkpoint_directory)
    load_flag = 0
    if (latest is not None):
        print("Loading checkpoint ", latest)
        status = checkpoint.restore(
            tf.train.latest_checkpoint(checkpoint_directory))
        load_flag = 1

    print("\nUsing eval interval: ", args.eval_interval)
    print("\nUsing batch size: ", args.batch_size)
    for ep in range(args.epochs):
        epoch_loss_avg = tfe.metrics.Mean()
        # for batch, (images, labels,weights) in enumerate(train_dataset):
        for (images, labels, weights) in tfe.Iterator(train_dataset):
            # pdb.set_trace()
            # loss_value, grads = util.cal_grad(model,
            #                                   loss_func=tf.losses.sigmoid_cross_entropy,
            #                                   inputs=images,
            #                                   targets=labels,
            #                                   weights=weights)

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                loss_value = tf.losses.sigmoid_cross_entropy(
                    labels, logits, weights)
            grads = tape.gradient(loss_value, model.trainable_variables)

            # print("Loss and gradient calculation, done \n")
            # pdb.set_trace()

            optimizer.apply_gradients(zip(grads, model.trainable_variables),
                                      global_step)
            epoch_loss_avg(loss_value)

            if global_step.numpy() % args.log_interval == 0:
                # pdb.set_trace()

                print(
                    'Epoch: {0:d}/{1:d} Iteration:{2:d}  Training Loss:{3:.4f}  '
                    .format(ep, args.epochs, global_step.numpy(),
                            epoch_loss_avg.result()))
                train_log['iter'].append(global_step.numpy())
                train_log['loss'].append(epoch_loss_avg.result())

                with tf.contrib.summary.always_record_summaries():
                    tf.contrib.summary.scalar('Training loss', loss_value)
                    tf.contrib.summary.scalar('Learning rate', learning_rate)
                    for i, variable in enumerate(model.trainable_variables):
                        tf.contrib.summary.histogram("grad_" + variable.name,
                                                     grads[i])

            if global_step.numpy() % args.eval_interval == 0:
                print("\n **** Running Eval *****\n")
                test_AP, test_mAP = util.eval_dataset_map(model, test_dataset)
                print("Eval finsished with test mAP : ", test_mAP)
                test_log['iter'].append(global_step.numpy())
                test_log['mAP'].append(test_mAP)
                with tf.contrib.summary.always_record_summaries():
                    tf.contrib.summary.scalar('Testing mAP', test_mAP)

        learning_rate.assign(
            tf.train.exponential_decay(args.lr, global_step, decay_interval,
                                       0.5)())
        print("Learning rate:", learning_rate)
        checkpoint.save(checkpoint_prefix)

    ## TODO write the training and testing code for multi-label classification

    AP, mAP = util.eval_dataset_map(model, test_dataset)
    rand_AP = util.compute_ap(test_labels,
                              np.random.random(test_labels.shape),
                              test_weights,
                              average=None)
    print('Random AP: {} mAP'.format(np.mean(rand_AP)))
    gt_AP = util.compute_ap(test_labels,
                            test_labels,
                            test_weights,
                            average=None)
    print('GT AP: {} mAP'.format(np.mean(gt_AP)))
    print('Obtained {} mAP'.format(mAP))
    print('Per class:')
    for cid, cname in enumerate(CLASS_NAMES):
        print('{}: {}'.format(cname, util.get_el(AP, cid)))
예제 #9
0
def main():
    parser = argparse.ArgumentParser(description='TensorFlow Pascal Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=20,
                        help='input batch size for training')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        help='number of epochs to train')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='learning rate')
    parser.add_argument('--seed', type=int, default=1, help='random seed')
    parser.add_argument('--log-interval',
                        type=int,
                        default=10,
                        help='how many batches to wait before'
                        ' logging training status')
    parser.add_argument('--eval-interval',
                        type=int,
                        default=50,
                        help='how many batches to wait before'
                        ' evaluate the model')
    parser.add_argument('--log-dir',
                        type=str,
                        default='tb',
                        help='path for logging directory')
    parser.add_argument('--data-dir',
                        type=str,
                        default='./data/VOCdevkit/VOC2007',
                        help='Path to PASCAL data storage')
    args = parser.parse_args()
    util.set_random_seed(args.seed)
    sess = util.set_session()

    train_images, train_labels, train_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='trainval')
    test_images, test_labels, test_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='test')

    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_images, train_labels, train_weights))
    train_dataset = train_dataset.map(augment_train_data)
    train_dataset = train_dataset.shuffle(10000).batch(args.batch_size)

    test_dataset = tf.data.Dataset.from_tensor_slices(
        (test_images, test_labels, test_weights))
    test_dataset = test_dataset.map(center_crop_test_data)
    test_dataset = test_dataset.batch(args.batch_size)

    model = SimpleCNN(num_classes=len(CLASS_NAMES))

    logdir = os.path.join(args.log_dir,
                          datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    if os.path.exists(logdir):
        shutil.rmtree(logdir)
    os.makedirs(logdir)
    writer = tf.contrib.summary.create_file_writer(logdir)
    writer.set_as_default()

    tf.contrib.summary.initialize()

    global_step = tf.train.get_or_create_global_step()
    optimizer = tf.train.AdamOptimizer(learning_rate=args.lr)
    train_log = {'iter': [], 'loss': [], 'accuracy': []}
    test_log = {'iter': [], 'loss': [], 'accuracy': []}
    for ep in range(args.epochs):
        epoch_loss_avg = tfe.metrics.Mean()
        for batch, (images, labels, weights) in enumerate(train_dataset):
            loss_value, grads = util.cal_grad(
                model,
                loss_func=tf.losses.sigmoid_cross_entropy,
                inputs=images,
                targets=labels,
                weights=weights)
            optimizer.apply_gradients(zip(grads, model.trainable_variables),
                                      global_step)
            epoch_loss_avg(loss_value)

            with tf.contrib.summary.always_record_summaries():
                tf.contrib.summary.scalar('Training Loss', loss_value)
            if global_step.numpy() % args.log_interval == 0:
                print(
                    'Epoch: {0:d}/{1:d} Iteration:{2:d}  Training Loss:{3:.4f}'
                    .format(ep, args.epochs, global_step.numpy(),
                            epoch_loss_avg.result()))
                train_log['iter'].append(global_step.numpy())
                train_log['loss'].append(epoch_loss_avg.result())
            if global_step.numpy() % args.eval_interval == 0:
                test_AP, test_mAP = util.eval_dataset_map(model, test_dataset)
                print("mAP: ", test_mAP)
                with tf.contrib.summary.always_record_summaries():
                    tf.contrib.summary.scalar('Test mAP', test_mAP)

    model.summary()

    # fig = plt.figure()
    # plt.plot(train_log['iter'], train_log['loss'], 'r', label='Training')
    # plt.plot(test_log['iter'], test_log['loss'], 'b', label='Testing')
    # plt.title('Loss')
    # plt.legend()
    # fig = plt.figure()
    # plt.plot(train_log['iter'], train_log['accuracy'], 'r', label='Training')
    # plt.plot(test_log['iter'], test_log['accuracy'], 'b', label='Testing')
    # plt.title('Accuracy')
    # plt.legend()
    # plt.show()

    AP, mAP = util.eval_dataset_map(model, test_dataset)
    rand_AP = util.compute_ap(test_labels,
                              np.random.random(test_labels.shape),
                              test_weights,
                              average=None)
    print('Random AP: {} mAP'.format(np.mean(rand_AP)))
    gt_AP = util.compute_ap(test_labels,
                            test_labels,
                            test_weights,
                            average=None)
    print('GT AP: {} mAP'.format(np.mean(gt_AP)))
    print('Obtained {} mAP'.format(mAP))
    print('Per class:')
    for cid, cname in enumerate(CLASS_NAMES):
        print('{}: {}'.format(cname, util.get_el(AP, cid)))
예제 #10
0
def main():
    parser = argparse.ArgumentParser(description='TensorFlow Pascal Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=10,
                        help='input batch size for training')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        help='number of epochs to train')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='learning rate')
    parser.add_argument('--seed', type=int, default=1, help='random seed')
    parser.add_argument('--log-interval',
                        type=int,
                        default=10,
                        help='how many batches to wait before'
                        ' logging training status')
    parser.add_argument('--eval-interval',
                        type=int,
                        default=20,
                        help='how many batches to wait before'
                        ' evaluate the model')
    parser.add_argument('--log-dir',
                        type=str,
                        default='tb',
                        help='path for logging directory')
    parser.add_argument('--data-dir',
                        type=str,
                        default='./VOCdevkit/VOC2007',
                        help='Path to PASCAL data storage')
    args = parser.parse_args()
    util.set_random_seed(args.seed)
    sess = util.set_session()

    train_images, train_labels, train_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='trainval')
    test_images, test_labels, test_weights = util.load_pascal(
        args.data_dir, class_names=CLASS_NAMES, split='test')

    ## TODO modify the following code to apply data augmentation here
    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_images, train_labels, train_weights))
    train_dataset = train_dataset.shuffle(10000).batch(args.batch_size)
    test_dataset = tf.data.Dataset.from_tensor_slices(
        (test_images, test_labels, test_weights))
    test_dataset = test_dataset.batch(args.batch_size)

    model = SimpleCNN(num_classes=len(CLASS_NAMES))

    logdir = os.path.join(args.log_dir,
                          datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    if os.path.exists(logdir):
        shutil.rmtree(logdir)
    os.makedirs(logdir)
    writer = tf.contrib.summary.create_file_writer(logdir)
    writer.set_as_default()

    ## TODO write the training and testing code for multi-label classification

    AP, mAP = util.eval_dataset_map(model, test_dataset)
    rand_AP = util.compute_ap(test_labels,
                              np.random.random(test_labels.shape),
                              test_weights,
                              average=None)
    print('Random AP: {} mAP'.format(np.mean(rand_AP)))
    gt_AP = util.compute_ap(test_labels,
                            test_labels,
                            test_weights,
                            average=None)
    print('GT AP: {} mAP'.format(np.mean(gt_AP)))
    print('Obtained {} mAP'.format(mAP))
    print('Per class:')
    for cid, cname in enumerate(CLASS_NAMES):
        print('{}: {}'.format(cname, util.get_el(AP, cid)))