def test(test_loader, model, args):
    # switch to evaluate mode
    model.eval()
    with torch.no_grad():
        bar = tqdm.tqdm(test_loader)
        iter_num = len(test_loader.dataset)
        ftime = 0
        ntime = 0
        for i, data in enumerate(bar):
            t = time.time()
            images, names, img_size = data

            images = images.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])
            size = (img_size[0].item(), img_size[1].item())        
            key_points = model(images)
            key_points = torch.sigmoid(key_points)
            ftime += (time.time() - t)
            t = time.time()
            visualize_save_path = os.path.join(CONFIGS["MISC"]["TMP"], 'visualize_test')
            os.makedirs(visualize_save_path, exist_ok=True)

            binary_kmap = key_points.squeeze().cpu().numpy() > CONFIGS['MODEL']['THRESHOLD']
            kmap_label = label(binary_kmap, connectivity=1)
            props = regionprops(kmap_label)
            plist = []
            for prop in props:
                plist.append(prop.centroid)

            #size = (400, 400) #change it when using other dataset.
            b_points = reverse_mapping(plist, numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], size=(400, 400))
            scale_w = size[1] / 400
            scale_h = size[0] / 400
            for i in range(len(b_points)):
                y1 = int(np.round(b_points[i][0] * scale_h))
                x1 = int(np.round(b_points[i][1] * scale_w))
                y2 = int(np.round(b_points[i][2] * scale_h))
                x2 = int(np.round(b_points[i][3] * scale_w))
                if x1 == x2:
                    angle = -np.pi / 2
                else:
                    angle = np.arctan((y1-y2) / (x1-x2))
                (x1, y1), (x2, y2) = get_boundary_point(y1, x1, angle, size[0], size[1])
                b_points[i] = (y1, x1, y2, x2)

            
            vis = visulize_mapping(b_points, size, names[0])
            cv2.imwrite(join(visualize_save_path, names[0].split('/')[-1]), vis)
            np_data = np.array(b_points)
            np.save(join(visualize_save_path, names[0].split('/')[-1].split('.')[0]), np_data)

    logger.info('forward time for total images: %.6f' % ftime)
    logger.info('post-processing time for total images: %.6f' % ntime)
    return ftime + ntime
Exemple #2
0
def calc_fetch_or_overwrite(grps, prop_obj, data, A, C, h5):
    """data should be a empty OrderedDict"""
    CALC_TYPE_MAPPING = U.reverse_mapping(
        {(calc_means, 'means'): ['bars', 'grped_bars', 'xy', 'grped_xy', 
                                 'grped_along_var', 'mp_grped_along_var', 'grped_along_var_2prop'],
         (calc_alx, 'alx'): ['alx', 'grped_alx', 'mp_alx'],
         (calc_distr, 'distr'): ['distr', 'grped_distr'],
         (calc_distr_ave, 'distr_ave'): ['grped_distr_ave'],
         (calc_imap, 'imap'): ['imap'],
         (calc_pmf, 'pmf'): ['pmf', 'grped_pmf'],
         (calc_omega_distr, 'omega_distr'): ['grped_omega_distr'],

         # this is a quite specific type of analysis
         (calc_pot_ener_map, 'pot_ener_map'): ['pot_ener_map'],

         # rama cannot be calculated and stored because the result dtype would
         # be object, and their shape would be of different dimensions,
         # .e.g. np.array([h, phip, psip]).shape: (10, 10) (10,) (10,)
         (get_rama, 'rama'): ['rama', 'rama_pmf'],
         })

    for c, gk in enumerate(grps):
        if A.v: logger.info('processing Group {0}: {1}'.format(c, gk))
        # ar: array
        for _ in ['plot_type', 'plotmp_type']:
            if hasattr(A, _):
                pt = getattr(A, _) # pt: plot_type or plotmp_type
                if A.v: logger.info('{0} detected'.format(pt))

        try:
            calc_type_func, calc_type = CALC_TYPE_MAPPING[pt]
        except KeyError:
            print 'Do not know how to calculate "{0}"'.format(pt)
            sys.exit(255)

        ar_name = '{0}_{1}'.format(calc_type, prop_obj.name)
        ar_where = os.path.join('/', gk)
        ar_whname = os.path.join(ar_where, ar_name)
        prop_dd = U.get_prop_dd(C, prop_obj.name)
        if h5.__contains__(ar_whname):
            if not A.overwrite:
                if A.v: logger.info('fetching subdata from precalculated result')
                sda = h5.getNode(ar_whname).read()     # sda: subdata
            else:
                if A.v: logger.info('overwriting old subdata with new ones')
                _ = h5.getNode(ar_whname)
                _.remove()
                ar = calc_type_func(h5, gk, grps[gk], prop_obj, prop_dd, A, C)
                if ar.dtype.name == 'object':
                    ar = pickle.dumps(ar)
                h5.createArray(where=ar_where, name=ar_name, object=ar)
                sda = ar
        else:
            if A.v: logger.info('Calculating subdata...')
            ar = calc_type_func(h5, gk, grps[gk], prop_obj, prop_dd, A, C)
            if ar.dtype.name == 'object':
                logger.info('data is of dtype=object, pickling it'.format(ar.dtype.name))
                # cannot be handled by tables yet, so pickle it first
                ar = pickle.dumps(ar)
            logger.info(ar_where)
            h5.createArray(where=ar_where, name=ar_name, object=ar)

            # an alternative way to pickling, not very useful because it's an
            # array, which adds unnecessary slicing. Leave here fore reference
            # --2013-07-25

            # theobject = h5.createVLArray(where=ar_where, name=ar_name, atom=ObjectAtom())
            # theobject.append(ar)
            sda = ar
        data[gk] = sda
Exemple #3
0
def validate(val_loader, model, epoch, writer, args):
    # switch to evaluate mode
    model.eval()
    total_acc = 0.0
    total_loss_hough = 0

    total_precision = np.zeros(99)
    total_recall = np.zeros(99)
    nums_precision = 0
    nums_recall = 0
    with torch.no_grad():
        bar = tqdm.tqdm(val_loader)
        iter_num = len(val_loader.dataset) // 1
        for i, data in enumerate(bar):

            images, hough_space_label8, gt_coords, names = data

            if CONFIGS["TRAIN"]["DATA_PARALLEL"]:
                images = images.cuda()
                hough_space_label8 = hough_space_label8.cuda()
            else:
                images = images.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])
                hough_space_label8 = hough_space_label8.cuda(
                    device=CONFIGS["TRAIN"]["GPU_ID"])

            keypoint_map = model(images)

            hough_space_loss = torch.nn.functional.binary_cross_entropy_with_logits(
                keypoint_map, hough_space_label8)
            writer.add_scalar('val/hough_space_loss', hough_space_loss.item(),
                              epoch * iter_num + i)

            acc = 0
            total_acc += acc

            loss = hough_space_loss
            if not torch.isnan(loss):
                total_loss_hough += loss.item()
            else:
                logger.info("Warnning: val loss is Nan.")

            key_points = torch.sigmoid(keypoint_map)
            binary_kmap = key_points.squeeze().cpu().numpy(
            ) > CONFIGS['MODEL']['THRESHOLD']
            kmap_label = label(binary_kmap, connectivity=1)
            props = regionprops(kmap_label)
            plist = []
            for prop in props:
                plist.append(prop.centroid)
            b_points = reverse_mapping(plist,
                                       numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                                       numRho=CONFIGS["MODEL"]["NUMRHO"],
                                       size=(400, 400))
            # [[y1, x1, y2, x2], [] ...]
            gt_coords = gt_coords[0].numpy().tolist()
            for i in range(1, 100):
                p, num_p = caculate_precision(b_points,
                                              gt_coords,
                                              thresh=i * 0.01)
                r, num_r = caculate_recall(b_points,
                                           gt_coords,
                                           thresh=i * 0.01)
                total_precision[i - 1] += p
                total_recall[i - 1] += r

            nums_precision += num_p
            nums_recall += num_r

        total_loss_hough = total_loss_hough / iter_num
        if nums_precision == 0:
            nums_precision = 0
        else:
            total_precision = total_precision / nums_precision
        if nums_recall == 0:
            total_recall = 0
        else:
            total_recall /= nums_recall

        writer.add_scalar('val/total_loss_hough', total_loss_hough, epoch)
        writer.add_scalar('val/total_precison', total_precision.mean(), epoch)
        writer.add_scalar('val/total_recall', total_recall.mean(), epoch)
        logger.info('Validation result: ==== Precision: %.5f, Recall: %.5f' %
                    (total_precision.mean(), total_recall.mean()))
        acc = 2 * total_precision * total_recall / (total_precision +
                                                    total_recall + 1e-6)
        logger.info('Validation result: ==== F-score: %.5f' % acc.mean())
        writer.add_scalar('val/f-score', acc.mean(), epoch)
    return acc.mean()
Exemple #4
0
def test(test_loader, model, args):
    # switch to evaluate mode
    model.eval()
    bar = tqdm.tqdm(test_loader)
    ftime = 0
    ttime = 0
    ntime = 0
    for i, data in enumerate(bar):
        t = time.time()
        images, names, size = data
        
        images = jt.array(images)
        # size = (size[0].item(), size[1].item())       
        key_points = model(images)

        if args.dump:
            break

        key_points = key_points.sigmoid()
        ftime += (time.time() - t)

        visualize_save_path = os.path.join(CONFIGS["MISC"]["TMP"], 'visualize_test')
        os.makedirs(visualize_save_path, exist_ok=True)

        binary_kmap = key_points.squeeze(0).squeeze(0).numpy() > CONFIGS['MODEL']['THRESHOLD']
        kmap_label = label(binary_kmap, connectivity=1)
        props = regionprops(kmap_label)
        plist = []
        for prop in props:
            plist.append(prop.centroid)

        size = (size[0][0], size[0][1])
        b_points = reverse_mapping(plist, numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], size=(400, 400))
        scale_w = size[1] / 400
        scale_h = size[0] / 400
        for i in range(len(b_points)):
            y1 = int(np.round(b_points[i][0] * scale_h))
            x1 = int(np.round(b_points[i][1] * scale_w))
            y2 = int(np.round(b_points[i][2] * scale_h))
            x2 = int(np.round(b_points[i][3] * scale_w))
            if x1 == x2:
                angle = -np.pi / 2
            else:
                angle = np.arctan((y1-y2) / (x1-x2))
            (x1, y1), (x2, y2) = get_boundary_point(y1, x1, angle, size[0], size[1])
            b_points[i] = (y1, x1, y2, x2)
        ttime += (time.time() - t)
        
        vis = visulize_mapping(b_points, size, names[0])

        

        cv2.imwrite(join(visualize_save_path, names[0].split('/')[-1]), vis)
        np_data = np.array(b_points)
        np.save(join(visualize_save_path, names[0].split('/')[-1].split('.')[0]), np_data)

        ntime += (time.time() - t)
    if args.dump:
        return 0
    print('forward fps for total images: %.6f' % (len(test_loader) / ftime))
    print('forward + post-processing fps for total images: %.6f' % (len(test_loader) / ttime))
    print('total fps for total images: %.6f' % (len(test_loader) / ntime))
    return ntime
def validate(val_loader, model, epoch, writer, args):
    # switch to evaluate mode
    model.eval()
    total_acc = 0.0
    total_loss_hough = 0

    total_tp = np.zeros(99)
    total_fp = np.zeros(99)
    total_fn = np.zeros(99)

    total_tp_align = np.zeros(99)
    total_fp_align = np.zeros(99)
    total_fn_align = np.zeros(99)

    with torch.no_grad():
        bar = tqdm.tqdm(val_loader)
        iter_num = len(val_loader.dataset) // 1
        for i, data in enumerate(bar):

            images, hough_space_label8, gt_coords, names = data

            if CONFIGS["TRAIN"]["DATA_PARALLEL"]:
                images = images.cuda()
                hough_space_label8 = hough_space_label8.cuda()
            else:
                images = images.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])
                hough_space_label8 = hough_space_label8.cuda(
                    device=CONFIGS["TRAIN"]["GPU_ID"])

            keypoint_map = model(images)

            hough_space_loss = torch.nn.functional.binary_cross_entropy_with_logits(
                keypoint_map, hough_space_label8)
            writer.add_scalar('val/hough_space_loss', hough_space_loss.item(),
                              epoch * iter_num + i)

            acc = 0
            total_acc += acc

            loss = hough_space_loss
            if not torch.isnan(loss):
                total_loss_hough += loss.item()
            else:
                logger.info("Warnning: val loss is Nan.")

            key_points = torch.sigmoid(keypoint_map)
            binary_kmap = key_points.squeeze().cpu().numpy(
            ) > CONFIGS['MODEL']['THRESHOLD']
            kmap_label = label(binary_kmap, connectivity=1)
            props = regionprops(kmap_label)
            plist = []
            for prop in props:
                plist.append(prop.centroid)
            b_points = reverse_mapping(plist,
                                       numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                                       numRho=CONFIGS["MODEL"]["NUMRHO"],
                                       size=(400, 400))
            # [[y1, x1, y2, x2], [] ...]
            gt_coords = gt_coords[0].tolist()
            for i in range(1, 100):
                tp, fp, fn = caculate_tp_fp_fn(b_points,
                                               gt_coords,
                                               thresh=i * 0.01)
                total_tp[i - 1] += tp
                total_fp[i - 1] += fp
                total_fn[i - 1] += fn

            if CONFIGS["MODEL"]["EDGE_ALIGN"]:
                for i in range(len(b_points)):
                    b_points[i] = edge_align(b_points[i], names[0], division=5)

                for i in range(1, 100):
                    tp, fp, fn = caculate_tp_fp_fn(b_points,
                                                   gt_coords,
                                                   thresh=i * 0.01)
                    total_tp_align[i - 1] += tp
                    total_fp_align[i - 1] += fp
                    total_fn_align[i - 1] += fn

        total_loss_hough = total_loss_hough / iter_num

        total_recall = total_tp / (total_tp + total_fn + 1e-8)
        total_precision = total_tp / (total_tp + total_fp + 1e-8)
        f = 2 * total_recall * total_precision / (total_recall +
                                                  total_precision + 1e-8)

        writer.add_scalar('val/total_loss_hough', total_loss_hough, epoch)
        writer.add_scalar('val/total_precison', total_precision.mean(), epoch)
        writer.add_scalar('val/total_recall', total_recall.mean(), epoch)
        logger.info('Validation result: ==== Precision: %.5f, Recall: %.5f' %
                    (total_precision.mean(), total_recall.mean()))
        acc = f.mean()
        logger.info('Validation result: ==== F-measure: %.5f' % acc.mean())
        logger.info('Validation result: ==== [email protected]: %.5f' % f[95 - 1])
        writer.add_scalar('val/f-measure', acc.mean(), epoch)
        writer.add_scalar('val/[email protected]', f[95 - 1], epoch)

        if CONFIGS["MODEL"]["EDGE_ALIGN"]:
            total_recall_align = total_tp_align / (total_tp_align +
                                                   total_fn_align + 1e-8)
            total_precision_align = total_tp_align / (total_tp_align +
                                                      total_fp_align + 1e-8)
            f_align = 2 * total_recall_align * total_precision_align / (
                total_recall_align + total_precision_align + 1e-8)
            writer.add_scalar('val/total_precison_align',
                              total_precision_align.mean(), epoch)
            writer.add_scalar('val/total_recall_align',
                              total_recall_align.mean(), epoch)
            logger.info(
                'Validation result (Aligned): ==== Precision: %.5f, Recall: %.5f'
                % (total_precision_align.mean(), total_recall_align.mean()))
            acc = f_align.mean()
            logger.info('Validation result (Aligned): ==== F-measure: %.5f' %
                        acc.mean())
            logger.info(
                'Validation result (Aligned): ==== [email protected]: %.5f' %
                f_align[95 - 1])
            writer.add_scalar('val/f-measure', acc.mean(), epoch)
            writer.add_scalar('val/[email protected]', f_align[95 - 1], epoch)
    return acc.mean()