Exemple #1
0
def single_run_test(hyperparams):
    img, gt = load_and_update(hyperparams)
    ofname = get_checkpoint_filename(hyperparams)[:-4]
    ofname += "_" + hyperparams['dataset']
    prediction, hyperparams = model_prediction(img, ofname, hyperparams)

    if hyperparams['sampling_mode'] == 'fixed':
        gt = get_fixed_sets(hyperparams['run'],
                            hyperparams['sample_path'],
                            hyperparams['dataset'],
                            mode='test')
    run_results = metrics(prediction, gt, hyperparams['ignored_labels'],
                          hyperparams['n_classes'])
    path = '{rdir}/prediction_training_{train_dataset}_test_{dataset}_epoch_{epoch}_batch_{batch_size}'.format(
        **hyperparams)
    os.makedirs(path, exist_ok=True)
    show_results(run_results,
                 None,
                 hyperparams['model'],
                 hyperparams['dataset'],
                 path,
                 hyperparams['preprocessing']["type"],
                 label_values=hyperparams['label_values'],
                 training_image=hyperparams['train_dataset'],
                 agregated=False)
    plot_names = {
        'path': path,
        'checkpoint': get_checkpoint_filename(hyperparams),
        'dataset': hyperparams['dataset'],
        'ignored': hyperparams['ignored_labels']
    }
    create_plots(hyperparams['multi_class'], prediction, gt, plot_names)
Exemple #2
0
def main(args):
    img = Image.open(args.img_path)
    if args.has_reference != 'False':
        ref_img = Image.open(args.has_reference)

    #begin processing
    if args.method == 'HE':
        model = HE()
    if args.method == 'Gamma':
        model = Gamma(args.gamma)
    if args.method == 'Gray_World':
        model = Gray_World(args.gamma)
    if args.method == 'Retinex':
        model = Retinex(args)
    if args.method == 'Max_RGB':
        model = Max_RGB(args.gamma)
    if args.method == 'DeHaze':
        model = DeHaze(args.omega, args.kernel_size, args.model)
    if args.method == 'LIME':
        model = LIME(args.gamma, args.alpha, args.sigma, args.kernel_size)

    pro_img = model.run(img)  #processing image

    #print out the results
    if args.has_reference == 'False':
        show_results(img, pro_img)
        print_results(args, img, pro_img)
    else:
        show_results(img, pro_img, ref_img)
        print_results(args, img, pro_img, ref_img)

    return
Exemple #3
0
def predict(valid_loader, encoder, decoder, device, show=True):
    # switch to evaluation mode
    encoder.eval()
    decoder.eval()

    vocab = valid_loader.dataset.vocab
    batch = next(iter(valid_loader))
    batch = {k: v.to(device) for k, v in batch.items()}
    with torch.no_grad():
        features = encoder(batch["images"])
        predictions = decoder.predict(features, config.MAX_LEN_PRED, vocab)
        predicted_sequence = torch.argmax(predictions.cpu(), -1)

        captions = vocab.decode_batch(predicted_sequence)
        show_results(captions, batch, vocab)

    return captions
Exemple #4
0
        probabilities = test(model, img, hyperparams)
        prediction = np.argmax(probabilities, axis=-1)

    prediction = prediction[(PATCH_SIZE // 2):-(PATCH_SIZE // 2),
                            (PATCH_SIZE // 2):-(PATCH_SIZE // 2)]
    test_gt = test_gt[(PATCH_SIZE // 2):-(PATCH_SIZE // 2),
                      (PATCH_SIZE // 2):-(PATCH_SIZE // 2)]
    gt = gt[(PATCH_SIZE // 2):-(PATCH_SIZE // 2),
            (PATCH_SIZE // 2):-(PATCH_SIZE // 2)]

    run_results = metrics(prediction,
                          test_gt,
                          ignored_labels=hyperparams['ignored_labels'],
                          n_classes=N_CLASSES)

    mask = np.zeros(gt.shape, dtype='bool')
    for l in IGNORED_LABELS:
        mask[gt == l] = True
    prediction[mask] = 0
    color_prediction = convert_to_color(prediction)
    display_predictions(color_prediction,
                        viz,
                        gt=convert_to_color(gt),
                        caption="Prediction vs. ground truth")

    results.append(run_results)
    show_results(run_results, viz, label_values=LABEL_VALUES)

if N_RUNS > 1:
    show_results(results, viz, label_values=LABEL_VALUES, agregated=True)
Exemple #5
0
def main(raw_args=None):
    parser = argparse.ArgumentParser(
        description="Hyperspectral image classification with FixMatch")
    parser.add_argument(
        '--patch_size',
        type=int,
        default=5,
        help='Size of patch around each pixel taken for classification')
    parser.add_argument(
        '--center_pixel',
        action='store_false',
        help=
        'use if you only want to consider the label of the center pixel of a patch'
    )
    parser.add_argument('--batch_size',
                        type=int,
                        default=10,
                        help='Size of each batch for training')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        help='number of total epochs of training to run')
    parser.add_argument('--dataset',
                        type=str,
                        default='Salinas',
                        help='Name of dataset to run, Salinas or PaviaU')
    parser.add_argument('--cuda',
                        type=int,
                        default=-1,
                        help='what CUDA device to run on, -1 defaults to cpu')
    parser.add_argument('--warmup',
                        type=float,
                        default=0,
                        help='warmup epochs')
    parser.add_argument('--save',
                        action='store_true',
                        help='use to save model weights when running')
    parser.add_argument(
        '--test_stride',
        type=int,
        default=1,
        help='length of stride when sliding patch window over image for testing'
    )
    parser.add_argument(
        '--sampling_percentage',
        type=float,
        default=0.3,
        help=
        'percentage of dataset to sample for training (labeled and unlabeled included)'
    )
    parser.add_argument(
        '--sampling_mode',
        type=str,
        default='nalepa',
        help='how to sample data, disjoint, random, nalepa, or fixed')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='initial learning rate')
    parser.add_argument('--alpha',
                        type=float,
                        default=1.0,
                        help='beta distribution range')
    parser.add_argument(
        '--class_balancing',
        action='store_false',
        help='use to balance weights according to ratio in dataset')
    parser.add_argument(
        '--checkpoint',
        type=str,
        default=None,
        help='use to load model weights from a certain directory')
    #Augmentation arguments
    parser.add_argument('--flip_augmentation',
                        action='store_true',
                        help='use to flip augmentation data for use')
    parser.add_argument('--radiation_augmentation',
                        action='store_true',
                        help='use to radiation noise data for use')
    parser.add_argument('--mixture_augmentation',
                        action='store_true',
                        help='use to mixture noise data for use')
    parser.add_argument('--pca_augmentation',
                        action='store_true',
                        help='use to pca augment data for use')
    parser.add_argument(
        '--pca_strength',
        type=float,
        default=1.0,
        help='Strength of the PCA augmentation, defaults to 1.')
    parser.add_argument('--cutout_spatial',
                        action='store_true',
                        help='use to cutout spatial for data augmentation')
    parser.add_argument('--cutout_spectral',
                        action='store_true',
                        help='use to cutout spectral for data augmentation')
    parser.add_argument(
        '--augmentation_magnitude',
        type=int,
        default=1,
        help=
        'Magnitude of augmentation (so far only for cutout). Defualts to 1, min 1 and max 10.'
    )
    parser.add_argument('--spatial_combinations',
                        action='store_true',
                        help='use to spatial combine for data augmentation')
    parser.add_argument('--spectral_mean',
                        action='store_true',
                        help='use to spectal mean for data augmentation')
    parser.add_argument(
        '--moving_average',
        action='store_true',
        help='use to sprectral moving average for data augmentation')

    parser.add_argument('--results',
                        type=str,
                        default='results',
                        help='where to save results to (default results)')
    parser.add_argument('--save_dir',
                        type=str,
                        default='/saves/',
                        help='where to save models to (default /saves/)')
    parser.add_argument('--data_dir',
                        type=str,
                        default='/data/',
                        help='where to fetch data from (default /data/)')
    parser.add_argument('--load_file',
                        type=str,
                        default=None,
                        help='wihch file to load weights from (default None)')
    parser.add_argument(
        '--fold',
        type=int,
        default=0,
        help='Which fold to sample from if using Nalepas validation scheme')
    parser.add_argument(
        '--sampling_fixed',
        type=str,
        default='True',
        help=
        'Use to sample a fixed amount of samples for each class from Nalepa sampling'
    )
    parser.add_argument(
        '--samples_per_class',
        type=int,
        default=10,
        help=
        'Amount of samples to sample for each class when sampling a fixed amount. Defaults to 10.'
    )

    parser.add_argument(
        '--supervision',
        type=str,
        default='full',
        help=
        'check this more, use to make us of all labeled or not, full or semi')

    args = parser.parse_args(raw_args)

    device = utils.get_device(args.cuda)
    args.device = device

    #vis = visdom.Visdom()
    vis = None

    tensorboard_dir = str(args.results + '/' +
                          datetime.datetime.now().strftime("%m-%d-%X"))

    os.makedirs(tensorboard_dir, exist_ok=True)
    writer = SummaryWriter(tensorboard_dir)

    if args.sampling_mode == 'nalepa':
        train_img, train_gt, test_img, test_gt, label_values, ignored_labels, rgb_bands, palette = get_patch_data(
            args.dataset,
            args.patch_size,
            target_folder=args.data_dir,
            fold=args.fold)
        args.n_bands = train_img.shape[-1]
    else:
        img, gt, label_values, ignored_labels, rgb_bands, palette = get_dataset(
            args.dataset, target_folder=args.data_dir)
        args.n_bands = img.shape[-1]

    args.n_classes = len(label_values) - len(ignored_labels)
    args.ignored_labels = ignored_labels

    if palette is None:
        # Generate color palette
        palette = {0: (0, 0, 0)}
        for k, color in enumerate(
                sns.color_palette("hls",
                                  len(label_values) - 1)):
            palette[k + 1] = tuple(
                np.asarray(255 * np.array(color), dtype='uint8'))
    invert_palette = {v: k for k, v in palette.items()}

    def convert_to_color(x):
        return utils.convert_to_color_(x, palette=palette)

    def convert_from_color(x):
        return utils.convert_from_color_(x, palette=invert_palette)

    if args.sampling_mode == 'nalepa':
        print("{} samples selected (over {})".format(
            np.count_nonzero(train_gt),
            np.count_nonzero(train_gt) + np.count_nonzero(test_gt)))
        writer.add_text(
            'Amount of training samples',
            "{} samples selected (over {})".format(np.count_nonzero(train_gt),
                                                   np.count_nonzero(test_gt)))

        utils.display_predictions(convert_to_color(test_gt),
                                  vis,
                                  writer=writer,
                                  caption="Test ground truth")
    else:
        train_gt, test_gt = utils.sample_gt(gt,
                                            args.sampling_percentage,
                                            mode=args.sampling_mode)
        print("{} samples selected (over {})".format(
            np.count_nonzero(train_gt), np.count_nonzero(gt)))
        writer.add_text(
            'Amount of training samples',
            "{} samples selected (over {})".format(np.count_nonzero(train_gt),
                                                   np.count_nonzero(gt)))

        utils.display_predictions(convert_to_color(train_gt),
                                  vis,
                                  writer=writer,
                                  caption="Train ground truth")
        utils.display_predictions(convert_to_color(test_gt),
                                  vis,
                                  writer=writer,
                                  caption="Test ground truth")

    model = HamidaEtAl(args.n_bands,
                       args.n_classes,
                       patch_size=args.patch_size)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          nesterov=True,
                          weight_decay=0.0005)
    #loss_labeled = nn.CrossEntropyLoss(weight=weights)
    #loss_unlabeled = nn.CrossEntropyLoss(weight=weights, reduction='none')

    if args.sampling_mode == 'nalepa':
        #Get fixed amount of random samples for validation
        idx_sup, idx_val, idx_unsup = get_pixel_idx(train_img, train_gt,
                                                    args.ignored_labels,
                                                    args.patch_size)

        if args.sampling_fixed == 'True':
            unique_labels = np.zeros(len(label_values))
            new_idx_sup = []
            index = 0
            for p, x, y in idx_sup:
                label = train_gt[p, x, y]
                if unique_labels[label] < args.samples_per_class:
                    unique_labels[label] += 1
                    new_idx_sup.append([p, x, y])
                    np.delete(idx_sup, index)
                index += 1
            idx_unsup = np.concatenate((idx_sup, idx_unsup))
            idx_sup = np.asarray(new_idx_sup)

        writer.add_text(
            'Amount of labeled training samples',
            "{} samples selected (over {})".format(idx_sup.shape[0],
                                                   np.count_nonzero(train_gt)))
        train_labeled_gt = [
            train_gt[p_l, x_l, y_l] for p_l, x_l, y_l in idx_sup
        ]

        samples_class = np.zeros(args.n_classes)
        for c in np.unique(train_labeled_gt):
            samples_class[c - 1] = np.count_nonzero(train_labeled_gt == c)
        writer.add_text('Labeled samples per class', str(samples_class))
        print('Labeled samples per class: ' + str(samples_class))

        val_dataset = HyperX_patches(train_img,
                                     train_gt,
                                     idx_val,
                                     labeled='Val',
                                     **vars(args))
        val_loader = data.DataLoader(val_dataset, batch_size=args.batch_size)

        train_dataset = HyperX_patches(train_img,
                                       train_gt,
                                       idx_sup,
                                       labeled=True,
                                       **vars(args))
        train_loader = data.DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            #pin_memory=True, num_workers=5,
            shuffle=True,
            drop_last=True)

        amount_labeled = idx_sup.shape[0]
    else:
        train_labeled_gt, val_gt = utils.sample_gt(train_gt,
                                                   0.95,
                                                   mode=args.sampling_mode)

        val_dataset = HyperX(img, val_gt, labeled='Val', **vars(args))
        val_loader = data.DataLoader(val_dataset, batch_size=args.batch_size)

        writer.add_text(
            'Amount of labeled training samples',
            "{} samples selected (over {})".format(
                np.count_nonzero(train_labeled_gt),
                np.count_nonzero(train_gt)))
        samples_class = np.zeros(args.n_classes)
        for c in np.unique(train_labeled_gt):
            samples_class[c - 1] = np.count_nonzero(train_labeled_gt == c)
        writer.add_text('Labeled samples per class', str(samples_class))

        train_dataset = HyperX(img,
                               train_labeled_gt,
                               labeled=True,
                               **vars(args))
        train_loader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       pin_memory=True,
                                       num_workers=5,
                                       shuffle=True,
                                       drop_last=True)

        utils.display_predictions(convert_to_color(train_labeled_gt),
                                  vis,
                                  writer=writer,
                                  caption="Labeled train ground truth")
        utils.display_predictions(convert_to_color(val_gt),
                                  vis,
                                  writer=writer,
                                  caption="Validation ground truth")

        amount_labeled = np.count_nonzero(train_labeled_gt)

    args.iterations = amount_labeled // args.batch_size
    args.total_steps = args.iterations * args.epochs
    args.scheduler = get_cosine_schedule_with_warmup(
        optimizer, args.warmup * args.iterations, args.total_steps)

    if args.class_balancing:
        weights_balance = utils.compute_imf_weights(train_gt,
                                                    len(label_values),
                                                    args.ignored_labels)
        args.weights = torch.from_numpy(weights_balance[1:])
        args.weights = args.weights.to(torch.float32)
    else:
        weights = torch.ones(args.n_classes)
        #weights[torch.LongTensor(args.ignored_labels)] = 0
        args.weights = weights

    args.weights = args.weights.to(args.device)
    criterion = nn.CrossEntropyLoss(weight=args.weights)
    loss_val = nn.CrossEntropyLoss(weight=args.weights)

    print(args)
    print("Network :")
    writer.add_text('Arguments', str(args))
    with torch.no_grad():
        for input, _ in train_loader:
            break
        #summary(model.to(device), input.size()[1:])
        #writer.add_graph(model.to(device), input)
        # We would like to use device=hyperparams['device'] altough we have
        # to wait for torchsummary to be fixed first.

    if args.load_file is not None:
        model.load_state_dict(torch.load(args.load_file))
    model.zero_grad()

    try:
        train(model,
              optimizer,
              criterion,
              loss_val,
              train_loader,
              writer,
              args,
              val_loader=val_loader,
              display=vis)
    except KeyboardInterrupt:
        # Allow the user to stop the training
        pass

    if args.sampling_mode == 'nalepa':
        probabilities = test(model, test_img, args)
    else:
        probabilities = test(model, img, args)
    prediction = np.argmax(probabilities, axis=-1)

    run_results = utils.metrics(prediction,
                                test_gt,
                                ignored_labels=args.ignored_labels,
                                n_classes=args.n_classes)

    mask = np.zeros(test_gt.shape, dtype='bool')
    for l in args.ignored_labels:
        mask[test_gt == l] = True
    prediction += 1
    prediction[mask] = 0

    color_prediction = convert_to_color(prediction)
    utils.display_predictions(color_prediction,
                              vis,
                              gt=convert_to_color(test_gt),
                              writer=writer,
                              caption="Prediction vs. test ground truth")

    utils.show_results(run_results,
                       vis,
                       writer=writer,
                       label_values=label_values)

    writer.close()

    return run_results
Exemple #6
0
def test_all_dataset(xargs, test_loaders, URT_model, logger, writter, mode,
                     training_iter, cosine_temp):
    URT_model.eval()
    our_name = 'urt'
    accs_names = [our_name]
    alg2data2accuracy = collections.OrderedDict()
    alg2data2accuracy['sur-paper'], alg2data2accuracy[
        'sur-exp'] = pre_load_results()
    alg2data2accuracy[our_name] = {name: [] for name in test_loaders.keys()}

    logger.print(
        '\n{:} starting evaluate the {:} set at the {:}-th iteration.'.format(
            time_string(), mode, training_iter))
    for idata, (test_dataset, loader) in enumerate(test_loaders.items()):
        logger.print('===>>> {:} --->>> {:02d}/{:02d} --->>> {:}'.format(
            time_string(), idata, len(test_loaders), test_dataset))
        our_losses = AverageMeter()
        for idx, (_, context_features, context_labels, target_features,
                  target_labels) in enumerate(loader):
            context_features, context_labels = context_features.squeeze(
                0).cuda(), context_labels.squeeze(0).cuda()
            target_features, target_labels = target_features.squeeze(
                0).cuda(), target_labels.squeeze(0).cuda()
            n_classes = len(np.unique(context_labels.cpu().numpy()))
            # optimize selection parameters and perform feature selection
            avg_urt_params = get_lambda_urt_avg(context_features,
                                                context_labels,
                                                n_classes,
                                                URT_model,
                                                normalize=True)

            urt_context_features = apply_urt_avg_selection(context_features,
                                                           avg_urt_params,
                                                           normalize=True)
            urt_target_features = apply_urt_avg_selection(target_features,
                                                          avg_urt_params,
                                                          normalize=True)
            proto_list = []
            for label in range(n_classes):
                proto = urt_context_features[context_labels == label].mean(
                    dim=0)
                proto_list.append(proto)
            urt_proto = torch.stack(proto_list)

            #if random.random() > 0.99:
            #  print("urt avg score {}".format(avg_urt_params))
            #  print("-"*20)
            with torch.no_grad():
                logits = get_cosine_logits(urt_target_features, urt_proto,
                                           cosine_temp)
                loss = F.cross_entropy(logits, target_labels)
                our_losses.update(loss.item())
                predicts = torch.argmax(logits, dim=-1)
                final_acc = torch.eq(target_labels,
                                     predicts).float().mean().item()
                alg2data2accuracy[our_name][test_dataset].append(final_acc)
        base_name = '{:}-{:}'.format(test_dataset, mode)
        writter.add_scalar("{:}-our-loss".format(base_name), our_losses.avg,
                           training_iter)
        writter.add_scalar("{:}-our-acc".format(base_name),
                           np.mean(alg2data2accuracy[our_name][test_dataset]),
                           training_iter)

    dataset_names = list(test_loaders.keys())
    show_results(dataset_names, alg2data2accuracy, ('sur-paper', our_name),
                 logger.print)
    logger.print("\n")
Exemple #7
0
def results():
    videoData = show_results()
    return render_template('results.html', videoData=videoData)
	
	# Add time and uid to the likelihood array
	time = (X_test[:,:,1])[:, np.newaxis]
	time = np.swapaxes(time, 1, -1)

	uid = (X_test[:,:,0])[:, np.newaxis]
	uid = np.swapaxes(uid, 1, -1)

	y_est = np.concatenate((uid, time, likelihood),-1)

	max_time = int(max(y_est[-1,:,1]) * 1000) # in ms

	time_series = np.zeros((max_time+1, 4))
	time_series[:,0] = np.arange(0,max_time+1)

	y_true = np.zeros((max_time+1, 4))
	y_true[:,0] = np.arange(0,max_time+1)

	for i in range(len(time)):
		time_series[(np.squeeze(time[i]) * 1000).astype(int),1] = np.sqrt(time_series[(np.squeeze(time[i]) * 1000).astype(int),1]**2 + likelihood[i,:,0]**2)
		time_series[(np.squeeze(time[i]) * 1000).astype(int),2] = np.sqrt(time_series[(np.squeeze(time[i]) * 1000).astype(int),2]**2 + likelihood[i,:,1]**2)
		time_series[(np.squeeze(time[i]) * 1000).astype(int),3] = np.sqrt(time_series[(np.squeeze(time[i]) * 1000).astype(int),3]**2 + likelihood[i,:,2]**2)

		y_true[(np.squeeze(time[i]) * 1000).astype(int),1] = np.sqrt(y_true[(np.squeeze(time[i]) * 1000).astype(int),1]**2 + y_test[i,:,0]**2)
		y_true[(np.squeeze(time[i]) * 1000).astype(int),2] = np.sqrt(y_true[(np.squeeze(time[i]) * 1000).astype(int),2]**2 + y_test[i,:,1]**2)
		y_true[(np.squeeze(time[i]) * 1000).astype(int),3] = np.sqrt(y_true[(np.squeeze(time[i]) * 1000).astype(int),3]**2 + y_test[i,:,2]**2)

	sdist = eval_prediction(likelihood, y_test[:], 'test', plot=False)

	show_results(sdist=sdist, name=model_name)
Exemple #9
0

#load model and weights
model = get_model(MODEL, args.dataset, N_CLASSES, N_BANDS, PATCH_SIZE)
print('Loading weights from %s' % WEIGHTS + '/model_best.pth')
model = model.to(device)
model.load_state_dict(torch.load(WEIGHTS + '/model_best.pth'))
model.eval()

#testing model
probabilities = test(model, WEIGHTS, img, PATCH_SIZE, N_CLASSES, device=device)
prediction = np.argmax(probabilities, axis=-1)

run_results = metrics(prediction, gt, n_classes=N_CLASSES)

prediction[gt < 0] = -1

#color results
colored_gt = color_results(gt + 1, palette)
colored_pred = color_results(prediction + 1, palette)

outfile = os.path.join(OUTPUT, DATASET, MODEL)
os.makedirs(outfile, exist_ok=True)

imageio.imsave(os.path.join(outfile, DATASET + '_gt.png'), colored_gt)
imageio.imsave(os.path.join(outfile, DATASET + '_' + MODEL + '_out.png'),
               colored_pred)

show_results(run_results, label_values=LABEL_VALUES)
del model
Exemple #10
0
  #    build the vocabulary
  vocab_filename = 'vocab_50.pkl'
  if not osp.isfile(vocab_filename):
    # Construct the vocabulary
    print('No existing visual word vocabulary found. Computing one from training images')
    vocab_size = 50  # Larger values will work better (to a point) but be slower to compute
    vocab = build_vocabulary(train_image_paths, vocab_size)
    with open(vocab_filename, 'wb') as f:
      pickle.dump(vocab, f)
      print('{:s} saved'.format(vocab_filename))

  #    get bags of sifts features
  train_image_feats = get_bags_of_sifts(train_image_paths, vocab_filename)
  test_image_feats = get_bags_of_sifts(test_image_paths, vocab_filename)

  #    return the list of scores of test images, and expressions
  predicted_categories = svm_classify(train_image_feats, train_labels, test_image_feats)

  y = 0
  count = 0
  for x in test_labels:
    if x == predicted_categories[y]:
      count = count + 1
    y = y + 1

  print("accuracy: ", count / len(test_labels))

  # save the confusion map of the svm test results
  show_results(train_image_paths, test_image_paths, train_labels, test_labels,
               categories, abbr_categories, predicted_categories)