Beispiel #1
0
def build_weibull(mean, distance, tail):
    weibull_model = {}
    for i in range(len(mean)):
        weibull_model[label[i]] = {}
        weibull = weibull_tailfitting(mean[i], distance[i], tailsize=tail)
        weibull_model[label[i]] = weibull
    return weibull_model
def main(train_args):

    # Setting network architecture.
    if (conv_name == 'fcnwideresnet50'):

        net = FCNWideResNet50(4, num_classes=args['num_classes'], pretrained=False, skip=True, hidden_classes=hidden).cuda(args['device'])
        
    elif (conv_name == 'fcndensenet121'):

        net = FCNDenseNet121(4, num_classes=args['num_classes'], pretrained=False, skip=True, hidden_classes=hidden).cuda(args['device'])
        
    print('Loading pretrained weights from file "' + pretrained_path + '"')
    net.load_state_dict(torch.load(pretrained_path))
    print(net)
    
    train_args['best_record'] = {'epoch': 0, 'lr': 1e-4, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'iou': 0}

    # Setting datasets.
    val_set = list_dataset.ListDataset(dataset_name, 'Validate', (train_args['h_size'], train_args['w_size']), 'statistical', hidden, overlap=False, use_dsm=True)
    val_loader = DataLoader(val_set, batch_size=1, num_workers=train_args['num_workers'], shuffle=False)

    test_set = list_dataset.ListDataset(dataset_name, 'Test', (train_args['h_size'], train_args['w_size']), 'statistical', hidden, overlap=True, use_dsm=True)
    test_loader = DataLoader(test_set, batch_size=1, num_workers=train_args['num_workers'], shuffle=False)

    # Setting criterion.
    criterion = CrossEntropyLoss2d(size_average=False, ignore_index=5).cuda(args['device'])

    # Making sure checkpoint and output directories are created.
    check_mkdir(ckpt_path)
    check_mkdir(os.path.join(ckpt_path, exp_name))
    check_mkdir(outp_path)
    check_mkdir(os.path.join(outp_path, exp_name))
    
    # Validation function.
    mean_list, dist_list = validate(val_loader, net, criterion, epoch, num_known_classes, num_unknown_classes, hidden, train_args)
    
    print('Fitting model...')
    tic = time.time()
    weibull_model = weibull_tailfitting(mean_list, dist_list, num_known_classes, tailsize=1000)
    toc = time.time()
    print('    Model Fitted - Elapsed Time %.2f' % (toc - tic))
    
    mean_list.clear()
    dist_list.clear()
    
    del mean_list
    del dist_list
    
    gc.collect()
    
    # Computing test.
    test(test_loader, net, criterion, epoch, num_known_classes, num_unknown_classes, hidden, train_args, True, epoch % args['save_freq'] == 0, weibull_model)
    
    print('Exiting...')
Beispiel #3
0
def openmax(test_features, test_softmax, mean, distances, num_classes,
            distance_type, weibull_tailsize, alpha_rank):

    weibull_model = weibull_tailfitting(mean,
                                        distances,
                                        num_classes,
                                        tailsize=weibull_tailsize)

    openmax = recalibrate_scores(weibull_model,
                                 test_features,
                                 test_softmax,
                                 num_classes,
                                 alpharank=alpha_rank,
                                 distance_type=distance_type)

    return openmax
Beispiel #4
0
def main():

    parser = argparse.ArgumentParser()

    # Optional arguments.
    parser.add_argument("--weibull_tailsize",
                        type=int,
                        default=WEIBULL_TAIL_SIZE,
                        help="Tail size used for weibull fitting")

    parser.add_argument(
        "--alpha_rank",
        type=int,
        default=ALPHA_RANK,
        help="Alpha rank to be used as a weight multiplier for top K scores")

    parser.add_argument(
        "--distance",
        default='eucos',
        help="Type of distance to be used for calculating distance \
        between mean vector and query image \
        (eucos, cosine, euclidean)")

    parser.add_argument(
        "--mean_files_path",
        default='data/mean_files/',
        help="Path to directory where mean activation vector (MAV) is saved.")

    parser.add_argument("--synsetfname",
                        default='synset_words_caffe_ILSVRC12.txt',
                        help="Path to Synset filename from caffe website")

    parser.add_argument(
        "--image_arrname",
        default='data/train_features/n01440764/n01440764_14280.JPEG.mat',
        help="Image Array name for which openmax scores are to be computed")

    parser.add_argument(
        "--distance_path",
        default='data/mean_distance_files/',
        help="Path to directory where distances of training data \
        from Mean Activation Vector is saved")

    args = parser.parse_args()

    distance_path = args.distance_path
    mean_path = args.mean_files_path
    alpha_rank = args.alpha_rank
    weibull_tailsize = args.weibull_tailsize
    synsetfname = args.synsetfname
    image_arrname = args.image_arrname

    labellist = getlabellist(synsetfname)
    weibull_model = weibull_tailfitting(mean_path,
                                        distance_path,
                                        labellist,
                                        tailsize=WEIBULL_TAIL_SIZE)

    print "Completed Weibull fitting on %s models" % len(weibull_model.keys())
    imgarr = loadmat(image_arrname)
    openmax, softmax = recalibrate_scores(weibull_model, labellist, imgarr)
    print "Image ArrName: %s" % image_arrname
    print "Softmax Scores ", softmax
    print "Openmax Scores ", openmax
    print openmax.shape, softmax.shape
Beispiel #5
0
def main():

    parser = argparse.ArgumentParser()


    # Optional arguments.
    parser.add_argument(
        "--weibull_tailsize",
        type=int,
        default=WEIBULL_TAIL_SIZE,
        help="Tail size used for weibull fitting"
    )
    
    parser.add_argument(
        "--alpha_rank",
        type=int,
        default=ALPHA_RANK,
        help="Alpha rank to be used as a weight multiplier for top K scores"
    )

    parser.add_argument(
        "--distance",
        default='eucos',
        help="Type of distance to be used for calculating distance \
        between mean vector and query image \
        (eucos, cosine, euclidean)"
    )

    parser.add_argument(
        "--mean_files_path",
        default='data/mean_files/',
        help="Path to directory where mean activation vector (MAV) is saved."        
    )

    parser.add_argument(
        "--synsetfname",
        default='synset_words_caffe_ILSVRC12.txt',
        help="Path to Synset filename from caffe website"        
    )

    parser.add_argument(
        "--image_arrname",
        default='data/train_features/n01440764/n01440764_14280.JPEG.mat',
        help="Image Array name for which openmax scores are to be computed"        
    )

    parser.add_argument(
        "--distance_path",
        default='data/mean_distance_files/',
        help="Path to directory where distances of training data \
        from Mean Activation Vector is saved"        
    )

    args = parser.parse_args()

    distance_path = args.distance_path
    mean_path = args.mean_files_path
    alpha_rank = args.alpha_rank
    weibull_tailsize = args.weibull_tailsize
    synsetfname = args.synsetfname
    image_arrname = args.image_arrname

    labellist = getlabellist(synsetfname)
    weibull_model = weibull_tailfitting(mean_path, distance_path, labellist,
                                        tailsize = WEIBULL_TAIL_SIZE)

    print "Completed Weibull fitting on %s models" %len(weibull_model.keys())
    imgarr = loadmat(image_arrname)
    openmax, softmax =  recalibrate_scores(weibull_model, labellist, imgarr)
    print "Image ArrName: %s" %image_arrname
    print "Softmax Scores ", softmax
    print "Openmax Scores ", openmax
    print openmax.shape, softmax.shape
Beispiel #6
0
def main():
    parser = argparse.ArgumentParser()

    # Optional arguments.
    parser.add_argument("--weibull_tailsize",
                        type=int,
                        default=WEIBULL_TAIL_SIZE,
                        help="Tail size used for weibull fitting")

    parser.add_argument(
        "--alpha_rank",
        type=int,
        default=ALPHA_RANK,
        help="Alpha rank to be used as a weight multiplier for top K scores")

    parser.add_argument(
        "--distance",
        default='eucos',
        help="Type of distance to be used for calculating distance \
        between mean vector and query image \
        (eucos, cosine, euclidean)")

    parser.add_argument("--synsetfname",
                        default='synset_words_caffe_ILSVRC12.txt',
                        help="Path to Synset filename from caffe website")

    parser.add_argument(
        "--image_folder",
        default='data/',
        help=
        "Image folder directory for which openmax scores are to be computed")

    parser.add_argument('-cls',
                        '--classes',
                        type=int,
                        nargs='+',
                        default=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))

    args = parser.parse_args()

    alpha_rank = args.alpha_rank
    weibull_tailsize = args.weibull_tailsize
    synsetfname = args.synsetfname

    labellist = getlabellist(synsetfname)
    image_folder = args.image_folder
    mean_path = os.path.join(image_folder, 'train', 'means')
    distance_path = os.path.join(image_folder, 'train', 'distances')
    weibull_model = weibull_tailfitting(mean_path,
                                        distance_path,
                                        labellist,
                                        tailsize=weibull_tailsize)
    print("Completed Weibull fitting on %s models" % len(weibull_model.keys()))

    # per class
    categories = list(
        filter(lambda x: os.path.isdir(os.path.join(image_folder, 'test', x)),
               os.listdir(os.path.join(image_folder, 'test'))))
    categories.sort()
    kkc = list(
        filter(lambda x: os.path.isdir(os.path.join(image_folder, 'test', x)),
               os.listdir(os.path.join(image_folder, 'train'))))
    kkc.sort()
    uuc = list(set(categories) - set(kkc))
    kkc_scores = []
    uuc_scores = []
    for category in categories:
        # per image
        if category in kkc:
            scores = kkc_scores
        else:
            scores = uuc_scores
        for image_arrname in glob.glob(
                os.path.join(image_folder, 'test', category, '*.mat')):
            imgarr = loadmat(image_arrname)
            openmax, softmax = recalibrate_scores(weibull_model,
                                                  labellist,
                                                  imgarr,
                                                  alpharank=alpha_rank)
            scores.append(np.max(openmax))
    kkc_scores = np.array(kkc_scores)
    uuc_scores = np.array(uuc_scores)
    print(kkc_scores.mean(), uuc_scores.mean())

    y_true = np.concatenate(
        (np.ones_like(kkc_scores), np.zeros_like(uuc_scores)))
    y_score = np.concatenate((kkc_scores, uuc_scores))
    fpr, tpr, thresholds = roc_curve(y_true, y_score)

    auc_score = auc(fpr, tpr)
    plt.plot(fpr, tpr)
    print(auc_score)
    plt.title('Openmax_ROC_Curve of {}, AUC: {:.4f}'.format(
        image_folder.split('/')[-1], auc_score))
    plt.xlabel('fpr')
    plt.ylabel('tpr')

    plt.savefig(os.path.join(image_folder, 'openmax_roc.png'))
    plt.show()