Exemple #1
0
def test(net,
         val_data,
         use_cuda,
         calc_weight_count=False,
         calc_flops=False,
         extended_log=False):
    acc_top1 = AverageMeter()
    acc_top5 = AverageMeter()

    tic = time.time()
    err_top1_val, err_top5_val = validate(
        acc_top1=acc_top1,
        acc_top5=acc_top5,
        net=net,
        val_data=val_data,
        use_cuda=use_cuda)
    if calc_weight_count:
        weight_count = calc_net_weight_count(net)
        logging.info('Model: {} trainable parameters'.format(weight_count))
    if calc_flops:
        n_flops, n_params = measure_model(net, 224, 224)
        logging.info('Params: {} ({:.2f}M), FLOPs: {} ({:.2f}M)'.format(
            n_params, n_params / 1e6, n_flops, n_flops / 1e6))
    if extended_log:
        logging.info('Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})'.format(
            top1=err_top1_val, top5=err_top5_val))
    else:
        logging.info('Test: err-top1={top1:.4f}\terr-top5={top5:.4f}'.format(
            top1=err_top1_val, top5=err_top5_val))
    logging.info('Time cost: {:.4f} sec'.format(
        time.time() - tic))
Exemple #2
0
def test(net,
         val_data,
         use_cuda,
         input_image_size,
         in_channels,
         calc_weight_count=False,
         calc_flops=False,
         calc_flops_only=True,
         extended_log=False):
    if not calc_flops_only:
        acc_top1 = AverageMeter()
        acc_top5 = AverageMeter()
        tic = time.time()
        err_top1_val, err_top5_val = validate(acc_top1=acc_top1,
                                              acc_top5=acc_top5,
                                              net=net,
                                              val_data=val_data,
                                              use_cuda=use_cuda)
        if extended_log:
            logging.info(
                'Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})'
                .format(top1=err_top1_val, top5=err_top5_val))
        else:
            logging.info(
                'Test: err-top1={top1:.4f}\terr-top5={top5:.4f}'.format(
                    top1=err_top1_val, top5=err_top5_val))
        logging.info('Time cost: {:.4f} sec'.format(time.time() - tic))

    if calc_weight_count:
        weight_count = calc_net_weight_count(net)
        if not calc_flops:
            logging.info('Model: {} trainable parameters'.format(weight_count))
    if calc_flops:
        num_flops, num_macs, num_params = measure_model(
            net, in_channels, input_image_size)
        assert (not calc_weight_count) or (weight_count == num_params)
        stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
                   " FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
        logging.info(
            stat_msg.format(params=num_params,
                            params_m=num_params / 1e6,
                            flops=num_flops,
                            flops_m=num_flops / 1e6,
                            flops2=num_flops / 2,
                            flops2_m=num_flops / 2 / 1e6,
                            macs=num_macs,
                            macs_m=num_macs / 1e6))
Exemple #3
0
def evaluate(segmentation_module, loader, cfg, gpu, activations, num_class,
             patch_size, patch_size_padded, class_names, channels, index_test,
             visualize, results_dir, arch_encoder):
    acc_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    acc_meter_patch = AverageMeter()
    intersection_meter_patch = AverageMeter()
    union_meter_patch = AverageMeter()
    time_meter = AverageMeter()

    # initiate confusion matrix
    conf_matrix = np.zeros((num_class, num_class))
    conf_matrix_patch = np.zeros((num_class, num_class))
    # turn on for initialise for umap
    area_activations_mean = np.zeros((len(index_test), 32 // 4 * 32 // 4))
    area_activations_max = np.zeros((len(index_test), 32 // 4 * 32 // 4))
    area_cl = np.zeros((len(index_test), ), dtype=np.int)
    area_loc = np.zeros((len(index_test), 3), dtype=np.int)
    j = 0

    segmentation_module.eval()

    pbar = tqdm(total=len(loader))
    for batch_data in loader:

        # process data
        batch_data = batch_data[0]
        seg_label = as_numpy(batch_data['seg_label'][0])
        img_resized_list = batch_data['img_data']

        torch.cuda.synchronize()
        tic = time.perf_counter()
        with torch.no_grad():
            segSize = (seg_label.shape[0], seg_label.shape[1])
            scores = torch.zeros(1, num_class, segSize[0], segSize[1])
            scores = async_copy_to(scores, gpu)

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, gpu)

                # forward pass
                scores_tmp = segmentation_module(feed_dict, segSize=segSize)
                scores = scores + scores_tmp

            _, pred = torch.max(scores, dim=1)
            pred = as_numpy(pred.squeeze(0).cpu())

        torch.cuda.synchronize()
        time_meter.update(time.perf_counter() - tic)

        # calculate accuracy
        acc, pix = accuracy(pred, seg_label)
        acc_patch, pix_patch = accuracy(
            pred[patch_size:2 * patch_size, patch_size:2 * patch_size],
            seg_label[patch_size:2 * patch_size, patch_size:2 * patch_size])

        intersection, union = intersectionAndUnion(pred, seg_label, num_class)
        intersection_patch, union_patch = intersectionAndUnion(
            pred[patch_size:2 * patch_size, patch_size:2 * patch_size],
            seg_label[patch_size:2 * patch_size,
                      patch_size:2 * patch_size], num_class)

        acc_meter.update(acc, pix)
        intersection_meter.update(intersection)
        union_meter.update(union)
        acc_meter_patch.update(acc_patch, pix_patch)
        intersection_meter_patch.update(intersection_patch)
        union_meter_patch.update(union_patch)

        conf_matrix = updateConfusionMatrix(conf_matrix, pred, seg_label)

        # update conf matrix patch
        conf_matrix_patch = updateConfusionMatrix(
            conf_matrix_patch, pred[patch_size:2 * patch_size,
                                    patch_size:2 * patch_size],
            seg_label[patch_size:2 * patch_size, patch_size:2 * patch_size])

        # visualization
        if visualize:
            info = batch_data['info']
            img_name = info.split('/')[-1]
            #np.save(os.path.join(test_dir, 'result', img_name), pred)
            np.save(os.path.join(results_dir, img_name), pred)


# =============================================================================
#         if visualize:
#             visualize_result(
#                 (batch_data['img_ori'], seg_label, batch_data['info']),
#                 pred,
#                 os.path.join(test_dir, 'result')
#             )
# =============================================================================

        pbar.update(1)

        # turn on for UMAP
        row, col, cl = find_constant_area(
            seg_label, 32, patch_size_padded
        )  #TODO patch_size_padded must be patch_size if only inner patch is checked.
        if not (row == 999999):
            activ_mean = np.mean(
                as_numpy(activations.features.squeeze(0).cpu()),
                axis=0,
                keepdims=True)[:, row // 4:row // 4 + 8,
                               col // 4:col // 4 + 8].reshape(1, 8 * 8)
            activ_max = np.max(as_numpy(activations.features.squeeze(0).cpu()),
                               axis=0,
                               keepdims=True)[:, row // 4:row // 4 + 8,
                                              col // 4:col // 4 + 8].reshape(
                                                  1, 8 * 8)

            area_activations_mean[j] = activ_mean
            area_activations_max[j] = activ_max
            area_cl[j] = cl
            area_loc[j, 0] = row
            area_loc[j, 1] = col
            area_loc[j, 2] = int(batch_data['info'].split('.')[0])
            j += 1
        else:
            area_activations_mean[j] = np.full((1, 64),
                                               np.nan,
                                               dtype=np.float32)
            area_activations_max[j] = np.full((1, 64),
                                              np.nan,
                                              dtype=np.float32)
            area_cl[j] = 999999
            area_loc[j, 0] = row
            area_loc[j, 1] = col
            area_loc[j, 2] = int(batch_data['info'].split('.')[0])
            j += 1

        #activ = np.mean(as_numpy(activations.features.squeeze(0).cpu()),axis=0)[row//4:row//4+8, col//4:col//4+8]
        #activ = as_numpy(activations.features.squeeze(0).cpu())

    # summary
    iou = intersection_meter.sum / (union_meter.sum + 1e-10)
    for i, _iou in enumerate(iou):
        print('class [{}], IoU: {:.4f}'.format(i, _iou))
    iou_patch = intersection_meter_patch.sum / (union_meter_patch.sum + 1e-10)
    for i, _iou_patch in enumerate(iou_patch):
        print('class [{}], patch IoU: {:.4f}'.format(i, _iou_patch))

    print('[Eval Summary]:')
    print(
        'Mean IoU: {:.4f}, Accuracy: {:.2f}%, Inference Time: {:.4f}s'.format(
            iou.mean(),
            acc_meter.average() * 100, time_meter.average()))
    print(
        'Patch: Mean IoU: {:.4f}, Accuracy: {:.2f}%, Inference Time: {:.4f}s'.
        format(iou_patch.mean(),
               acc_meter_patch.average() * 100, time_meter.average()))

    print('Confusion matrix:')
    plot_confusion_matrix(conf_matrix,
                          class_names,
                          normalize=True,
                          title='confusion matrix patch+padding',
                          cmap=plt.cm.Blues)
    plot_confusion_matrix(conf_matrix_patch,
                          class_names,
                          normalize=True,
                          title='confusion matrix patch',
                          cmap=plt.cm.Blues)

    np.save(os.path.join(results_dir, 'confmatrix.npy'), conf_matrix)
    np.save(os.path.join(results_dir, 'confmatrix_patch.npy'),
            conf_matrix_patch)
    # turn on for UMAP
    np.save(os.path.join(results_dir, 'activations_mean.npy'),
            area_activations_mean)
    np.save(os.path.join(results_dir, 'activations_max.npy'),
            area_activations_max)
    np.save(os.path.join(results_dir, 'activations_labels.npy'), area_cl)
    np.save(os.path.join(results_dir, 'activations_loc.npy'), area_loc)

    mcc = compute_mcc(conf_matrix)
    mcc_patch = compute_mcc(conf_matrix_patch)
    # save summary of results in csv
    summary = pd.DataFrame([[
        arch_encoder, patch_size, channels,
        acc_meter.average(),
        acc_meter_patch.average(),
        iou.mean(),
        iou_patch.mean(), mcc, mcc_patch
    ]],
                           columns=[
                               'model', 'patch_size', 'channels',
                               'test_accuracy', 'test_accuracy_patch',
                               'meanIoU', 'meanIoU_patch', 'mcc', 'mcc_patch'
                           ])
    summary.to_csv(os.path.join(results_dir, 'summary_results.csv'))
Exemple #4
0
def train_net(batch_size, num_epochs, start_epoch1, train_data, val_data, net,
              optimizer, lr_scheduler, lp_saver, log_interval, use_cuda):
    acc_top1 = AverageMeter()
    acc_top5 = AverageMeter()

    L = nn.CrossEntropyLoss()
    if use_cuda:
        L = L.cuda()

    assert (type(start_epoch1) == int)
    assert (start_epoch1 >= 1)
    if start_epoch1 > 1:
        logging.info('Start training from [Epoch {}]'.format(start_epoch1))
        err_top1_val, err_top5_val = validate(acc_top1=acc_top1,
                                              acc_top5=acc_top5,
                                              net=net,
                                              val_data=val_data,
                                              use_cuda=use_cuda)
        logging.info(
            '[Epoch {}] validation: err-top1={:.4f}\terr-top5={:.4f}'.format(
                start_epoch1 - 1, err_top1_val, err_top5_val))

    # weight_count = calc_net_weight_count(net)
    # logging.info('Model: {} trainable parameters'.format(weight_count))

    gtic = time.time()
    for epoch in range(start_epoch1 - 1, num_epochs):
        lr_scheduler.step()

        err_top1_train, train_loss = train_epoch(
            epoch,
            acc_top1,
            net,
            train_data,
            use_cuda,
            L,
            optimizer,
            # lr_scheduler,
            batch_size,
            log_interval)

        err_top1_val, err_top5_val = validate(acc_top1=acc_top1,
                                              acc_top5=acc_top5,
                                              net=net,
                                              val_data=val_data,
                                              use_cuda=use_cuda)

        logging.info(
            '[Epoch {}] validation: err-top1={:.4f}\terr-top5={:.4f}'.format(
                epoch + 1, err_top1_val, err_top5_val))

        if lp_saver is not None:
            state = {
                'epoch': epoch + 1,
                'state_dict': net.state_dict(),
                'optimizer': optimizer.state_dict(),
            }
            lp_saver_kwargs = {'state': state}
            lp_saver.epoch_test_end_callback(epoch1=(epoch + 1),
                                             params=[
                                                 err_top1_val, err_top1_train,
                                                 err_top5_val, train_loss
                                             ],
                                             **lp_saver_kwargs)

    logging.info('Total time cost: {:.2f} sec'.format(time.time() - gtic))
    if lp_saver is not None:
        logging.info('Best err-top5: {:.4f} at {} epoch'.format(
            lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
Exemple #5
0
def train_net(batch_size, num_epochs, start_epoch1, train_data, val_data, net,
              optimizer, lr_scheduler, lp_saver, log_interval, use_cuda):
    acc_metric_val = AverageMeter()
    acc_metric_train = AverageMeter()

    L = nn.CrossEntropyLoss()
    if use_cuda:
        L = L.cuda()

    assert (type(start_epoch1) == int)
    assert (start_epoch1 >= 1)
    if start_epoch1 > 1:
        logging.info('Start training from [Epoch {}]'.format(start_epoch1))
        err_val = validate1(accuracy_metric=acc_metric_val,
                            net=net,
                            val_data=val_data,
                            use_cuda=use_cuda)
        logging.info('[Epoch {}] validation: err={:.4f}'.format(
            start_epoch1 - 1, err_val))

    gtic = time.time()
    for epoch in range(start_epoch1 - 1, num_epochs):
        lr_scheduler.step()

        err_train, train_loss = train_epoch(
            epoch,
            acc_metric_train,
            net,
            train_data,
            use_cuda,
            L,
            optimizer,
            # lr_scheduler,
            batch_size,
            log_interval)

        err_val = validate1(accuracy_metric=acc_metric_val,
                            net=net,
                            val_data=val_data,
                            use_cuda=use_cuda)

        logging.info('[Epoch {}] validation: err={:.4f}'.format(
            epoch + 1, err_val))

        if lp_saver is not None:
            state = {
                'epoch': epoch + 1,
                'state_dict': net.state_dict(),
                'optimizer': optimizer.state_dict(),
            }
            lp_saver_kwargs = {'state': state}
            lp_saver.epoch_test_end_callback(
                epoch1=(epoch + 1),
                params=[
                    err_val, err_train, train_loss,
                    optimizer.param_groups[0]['lr']
                ],
                **lp_saver_kwargs)

    logging.info('Total time cost: {:.2f} sec'.format(time.time() - gtic))
    if lp_saver is not None:
        logging.info('Best err: {:.4f} at {} epoch'.format(
            lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
Exemple #6
0
def train_epoch(segmentation_module, loader, optimizers, history, epoch, cfg, 
                writer, epoch_iters, channels, patch_size, disp_iter, lr_encoder, lr_decoder):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    ave_total_loss = AverageMeter()
    ave_acc = AverageMeter()
    
    iterator = iter(loader)

    segmentation_module.train(not cfg['TRAIN']['fix_bn']) #i.e. True

    # main loop
    tic = time.time()
    for i in range(epoch_iters):
        # load a batch of data
        batch_data = next(iterator)
        data_time.update(time.time() - tic)
        segmentation_module.zero_grad()

# =============================================================================
#         # adjust learning rate # TODO turn off if you want stable lr.
#         cur_iter = i + (epoch - 1) * cfg['TRAIN']['epoch_iters']
#         adjust_learning_rate(optimizers, cur_iter, cfg, lr_encoder, lr_decoder)
# =============================================================================

        
        # get the data in correct format
        batch_images = torch.zeros(
                len(batch_data), 
                len(channels), 
                patch_size*3, 
                patch_size*3)
       
        if cfg['DATASET']['segm_downsampling_rate'] == 0:
            batch_segms = torch.zeros(
                    len(batch_data), 
                    patch_size*3, 
                    patch_size*3).long()
        else:
            batch_segms = torch.zeros(
                    len(batch_data), 
                    patch_size*3//cfg['DATASET']['segm_downsampling_rate'], 
                    patch_size*3//cfg['DATASET']['segm_downsampling_rate']).long()
        
        for j, bd in enumerate(batch_data): 
            batch_images[j] = bd['img_data']
            batch_segms[j] = bd['seg_label']
            
        batch_data = {'img_data': batch_images.cuda(), 'seg_label':batch_segms.cuda()}

        # forward pass
        #for HRNET # TODO: first one for HR model with acc/loss only on inner patch, second for smallmodel (or model without downsampling)
        #loss, acc = segmentation_module(batch_data, patch_size = int(patch_size/4)) 
        #loss, acc = segmentation_module(batch_data, patch_size = int(patch_size)) 
        loss, acc = segmentation_module(batch_data)
        loss = loss.mean()
        acc = acc.mean()

        # Backward
        loss.backward()
        for optimizer in optimizers:
            optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - tic)
        tic = time.time()

        # update average loss and acc
        ave_total_loss.update(loss.data.item())
        ave_acc.update(acc.data.item()*100)

        # calculate accuracy, and display
        if i % disp_iter == 0:
            print('Epoch: [{}][{}/{}], Time: {:.2f}, Data: {:.2f}, '
                  'lr_encoder: {:.6f}, lr_decoder: {:.6f}, '
                  'Accuracy: {:4.2f}, Loss: {:.6f}'
                  .format(epoch, i, epoch_iters,
                          batch_time.average(), data_time.average(),
                          cfg['TRAIN']['running_lr_encoder'], cfg['TRAIN']['running_lr_decoder'],
                          ave_acc.average(), ave_total_loss.average()))

        fractional_epoch = epoch - 1 + 1. * i / epoch_iters
        history['train']['epoch'].append(fractional_epoch)
        history['train']['loss'].append(loss.data.item())
        history['train']['acc'].append(acc.data.item())
        
    
    writer.add_scalar('Train/Loss', ave_total_loss.average(), epoch)
    writer.add_scalar('Train/Acc', ave_acc.average(), epoch)
Exemple #7
0
def validate(segmentation_module, loader, optimizers, history, epoch, cfg, writer,val_epoch_iters, channels, patch_size):
    ave_total_loss = AverageMeter()
    ave_acc = AverageMeter()
    time_meter = AverageMeter()

    segmentation_module.eval()
    
    iterator = iter(loader)

    # main loop
    tic = time.time()
    for i in range(val_epoch_iters):
        # load a batch of data
        batch_data = next(iterator)
        
        # get the data in correct format
        batch_images = torch.zeros(
                len(batch_data), 
                len(channels), 
                patch_size*3, 
                patch_size*3)
        
        if cfg['DATASET']['segm_downsampling_rate'] == 0:
            batch_segms = torch.zeros(
                    len(batch_data), 
                    patch_size*3, 
                    patch_size*3).long()
        else:
            batch_segms = torch.zeros(
                len(batch_data), 
                patch_size*3//cfg['DATASET']['segm_downsampling_rate'], 
                patch_size*3//cfg['DATASET']['segm_downsampling_rate']).long()
        
        for j, bd in enumerate(batch_data): 
            batch_images[j] = bd['img_data']
            batch_segms[j] = bd['seg_label']
            
        batch_data = {'img_data': batch_images.cuda(), 'seg_label':batch_segms.cuda()}
      
        with torch.no_grad():
            # forward pass
            loss, acc = segmentation_module(batch_data, patch_size = int(patch_size/4))
        
        loss = loss.mean()
        acc = acc.mean()

        # update average loss and acc
        ave_total_loss.update(loss.data.item())
        ave_acc.update(acc.data.item()*100)


        # measure elapsed time
        time_meter.update(time.time() - tic)
        tic = time.time()

        

        # calculate accuracy, and display
        fractional_epoch = epoch - 1 + 1. * i / val_epoch_iters
        history['val']['epoch'].append(fractional_epoch)
        history['val']['loss'].append(loss.data.item())
        history['val']['acc'].append(acc.data.item())
        
    print('Epoch: [{}], Time: {:.2f}, ' 
          'Val_Accuracy: {:4.2f}, Val_Loss: {:.6f}'
          .format(epoch, time_meter.average(),
                  ave_acc.average(), ave_total_loss.average()))
    writer.add_scalar('Val/Loss', ave_total_loss.average(), epoch)
    writer.add_scalar('Val/Acc', ave_acc.average(), epoch)

    return ave_total_loss.average()