Exemple #1
0
            full_np_scores = val_np_scores
            full_np_rectified_scores = val_rectified_np_scores
        else:
            full_np_scores = np.vstack((full_np_scores, val_np_scores))
            full_np_rectified_scores = np.vstack(
                (full_np_rectified_scores, val_rectified_np_scores))

        examples_counter += 1
        #update accuracy measures for each batch of images
        if examples_counter == batch_size:
            full_labels = th.from_numpy(np.array(full_labels, dtype=int))
            full_np_scores = th.from_numpy(full_np_scores)
            full_np_rectified_scores = th.from_numpy(full_np_rectified_scores)
            # compute top-1 and top-5 accuracy
            prec1, prec5 = utils.accuracy(full_np_scores,
                                          full_labels,
                                          topk=(1, min(5, b * P)))
            prec1_rectified, prec5_rectified = utils.accuracy(
                full_np_rectified_scores, full_labels, topk=(1, min(5, b * P)))
            top1_ft.update(prec1.item(), examples_counter)
            top5_ft.update(prec5.item(), examples_counter)
            top1_mc.update(prec1_rectified.item(), examples_counter)
            top5_mc.update(prec5_rectified.item(), examples_counter)
            # reinitialize the scores arrays
            full_np_scores = None
            full_np_rectified_scores = None
            full_labels = []
            examples_counter = 0

    #if there are some data left at the end, run a last update of the accuracy measures
    if full_labels != []:
            full_np_scores = val_np_scores
            rectified_full_np_scores = rectified_val_np_scores
        else:
            full_np_scores = np.vstack((full_np_scores, val_np_scores))
            rectified_full_np_scores = np.vstack(
                (rectified_full_np_scores, rectified_val_np_scores))

        examples_counter += 1

        if examples_counter == batch_size:
            full_labels = th.from_numpy(np.array(full_labels, dtype=int))
            full_np_scores = th.from_numpy(full_np_scores)
            rectified_full_np_scores = th.from_numpy(rectified_full_np_scores)
            #compute accuracy
            prec1, prec5 = utils.accuracy(full_np_scores,
                                          full_labels,
                                          topk=(1, min(5, batch_number * P)))
            rectified_prec1, rectified_prec5 = utils.accuracy(
                rectified_full_np_scores,
                full_labels,
                topk=(1, min(5, batch_number * P)))
            top1.update(prec1.item(), examples_counter)
            top5.update(prec5.item(), examples_counter)
            rectified_top1.update(rectified_prec1.item(), examples_counter)
            rectified_top5.update(rectified_prec5.item(), examples_counter)
            #re-init
            full_np_scores = None
            rectified_full_np_scores = None
            full_labels = []
            examples_counter = 0
Exemple #3
0
                if (i + 1) % iter_size == 0:
                    optimizer.step()
                    optimizer.zero_grad()

            scheduler.step(loss.cpu().data.numpy())

            #Model evaluation
            model.eval()

            for data in val_loader:
                inputs, labels = data
                if tc.is_available():
                    inputs, labels = inputs.cuda(gpu), labels.cuda(gpu)
                outputs = model(Variable(inputs))
                prec1, prec5 = utils.accuracy(outputs.data,
                                              labels,
                                              topk=(1, 5))
                top1.update(prec1.item(), inputs.size(0))
                top5.update(prec5.item(), inputs.size(0))

            current_elapsed_time = time.time() - starting_time
            print(
                '{:03}/{:03} | {} | Train : loss = {:.4f} | Val : acc@1 = {}% ; acc@5 = {}%'
                .format(epoch + 1, num_epochs,
                        timedelta(seconds=round(current_elapsed_time)),
                        running_loss / nb_batches, top1.avg, top5.avg))

            # Saving model
            if saving_intermediate_models:
                state = {
                    'state_dict': model.state_dict(),
                model = model.cuda(gpu)
            else:
                print("GPU not available")
                sys.exit(-1)

            print('Validation on Batch 1...')
            model.eval()
            top1 = AverageMeter.AverageMeter()
            top5 = AverageMeter.AverageMeter()
            # Validation on both old and new data
            for data in val_loader:
                inputs, labels = data
                if tc.is_available():
                    inputs, labels = inputs.cuda(gpu), labels.cuda(gpu)
                outputs = model(Variable(inputs))
                prec1, prec5 = utils.accuracy(outputs.data, labels, topk=(1, min(5, P * b)))
                top1.update(prec1.item(), inputs.size(0))
                top5.update(prec5.item(), inputs.size(0))
            # -------------------------------------------
            print('BATCH 1 | Val : acc@1 = {}% ; acc@5 = {}%'.format(top1.avg, top5.avg))
            top1_val_accuracies.append(top1.avg)
            top5_val_accuracies.append(top5.avg)
            print('top1 accuracies so far : ' + str(top1_val_accuracies))
            print('top5 accuracies so far : ' + str(top5_val_accuracies))


        else:
            batch_algo_name = algo_name + '_b' + str(b)
            batch_models_save_dir = os.path.join(models_save_dir, batch_algo_name)
            old_train_file_path = os.path.join(exemplars_dir, str(b) + '_old'+file_names_suffix)
            new_train_file_path = os.path.join(dataset_files_path, 'separated/train/batch' + str(b))
        full_labels.append(val_image_class)
        if full_np_rectified_scores is None:
            full_np_scores = val_np_scores
            full_np_rectified_scores = val_rectified_np_scores
        else:
            full_np_scores = np.vstack((full_np_scores, val_np_scores))
            full_np_rectified_scores = np.vstack((full_np_rectified_scores, val_rectified_np_scores))

        examples_counter += 1

        if examples_counter == batch_size:
            full_labels = th.from_numpy(np.array(full_labels, dtype=int))
            full_np_scores = th.from_numpy(full_np_scores)
            full_np_rectified_scores = th.from_numpy(full_np_rectified_scores)
            #compute accuracy
            prec1, prec5 = utils.accuracy(full_np_scores, full_labels, topk=(1, min(5, P * batch_number)))
            prec1_rectified, prec5_rectified = utils.accuracy(full_np_rectified_scores, full_labels, topk=(1, min(5, P * batch_number)))
            top1.update(prec1.item(), examples_counter)
            top5.update(prec5.item(), examples_counter)
            top1_rectified.update(prec1_rectified.item(), examples_counter)
            top5_rectified.update(prec5_rectified.item(), examples_counter)
            #re-init
            full_np_scores = None
            full_np_rectified_scores = None
            full_labels = []
            examples_counter = 0

    ##############################################################################
    ###########################################"
    if full_labels != []: #still missing some examples
        full_labels = th.from_numpy(np.array(full_labels, dtype=int))
Exemple #6
0
            full_np_scail_scores = test_scail_np_scores
        else:
            full_np_scores = np.vstack((full_np_scores, test_np_scores))
            full_np_scail_scores = np.vstack(
                (full_np_scail_scores, test_scail_np_scores))

        examples_counter += 1

        # update accuracy measures for each batch of images
        if examples_counter == batch_size:
            full_labels = th.from_numpy(np.array(full_labels, dtype=int))
            full_np_scores = th.from_numpy(full_np_scores)
            full_np_scail_scores = th.from_numpy(full_np_scail_scores)
            # compute accuracy
            prec5 = utils.accuracy(full_np_scores,
                                   full_labels,
                                   topk=[min(5, b * P)])
            prec5_scail = utils.accuracy(full_np_scail_scores,
                                         full_labels,
                                         topk=[min(5, b * P)])
            top5_ft.update(prec5[0].item(), examples_counter)
            top5_scail.update(prec5_scail[0].item(), examples_counter)
            # reinitialize the scores arrays
            full_np_scores = None
            full_np_scail_scores = None
            full_labels = []
            examples_counter = 0

    # if there are some data left at the end, run a last update of the accuracy measures
    if full_labels != []:
        full_labels = th.from_numpy(np.array(full_labels, dtype=int))