Пример #1
0
def get_pose_estimation_prediction(pose_model, image, centers, scales,
                                   transform):
    rotation = 0

    # pose estimation transformation
    model_inputs = []
    for center, scale in zip(centers, scales):
        trans = get_affine_transform(center, scale, rotation,
                                     cfg.MODEL.IMAGE_SIZE)
        # Crop smaller image of people
        model_input = cv2.warpAffine(
            image,
            trans,
            (int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
            flags=cv2.INTER_LINEAR)

        # hwc -> 1chw
        model_input = transform(model_input)  #.unsqueeze(0)
        model_inputs.append(model_input)

    # n * 1chw -> nchw
    model_inputs = torch.stack(model_inputs)

    # compute output heatmap
    output = pose_model(model_inputs.to(CTX))
    coords, _ = get_final_preds(cfg,
                                output.cpu().detach().numpy(),
                                np.asarray(centers), np.asarray(scales))

    return coords
Пример #2
0
def get_keypoints_from_bbox(pose_model, image, bbox):
    x1,y1,w,h = bbox
    bbox_input = []
    bbox_input.append([x1, y1, x1+w, y1+h])
    inputs, origin_img, center, scale = pre_process(image, bbox_input, scores=1, cfg=config)
    with torch.no_grad():
        # compute output heatmap
        output = pose_model(inputs.cuda())
        # compute coordinate
        preds, maxvals = get_final_preds(
            config, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))

    # (N, 17, 3)
    result = np.concatenate((preds, maxvals), -1)
    return result
Пример #3
0
def get_keypoints(human_model, pose_model, image):
    bboxs, scores = yolo_infrence(image, human_model)
    # bbox is coordinate location
    inputs, origin_img, center, scale = pre_process(image, bboxs, scores, config)

    with torch.no_grad():
        # compute output heatmap
        output = pose_model(inputs.cuda())
        # compute coordinate
        preds, maxvals = get_final_preds(
            config, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))

    # (N, 17, 3)
    result = np.concatenate((preds, maxvals), -1)
    return result
def get_pose_estimation_prediction(pose_model, image, centers, scales, box,
                                   transform):
    rotation = 0

    # pose estimation transformation
    model_inputs = []
    for center, scale in zip(centers, scales):
        cv2.imwrite('../data/nlos/nlos_result/first_input.jpg', image)
        trans = get_affine_transform(center, scale, rotation,
                                     cfg.MODEL.IMAGE_SIZE)
        # Crop smaller image of people
        model_input = cv2.warpAffine(
            image,
            trans,
            (int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
            flags=cv2.INTER_LINEAR)

        #print('model_input(w/ trans)', model_input.shape)
        img = model_input
        cv2.imwrite('../data/nlos/nlos_result/trans_input.jpg', img)

        #inv_trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE, inv=1)

        # hwc -> 1chw
        model_input = transform(model_input)  # .unsqueeze(0)
        model_inputs.append(model_input)

    # n * 1chw -> nchw
    model_inputs = torch.stack(model_inputs)
    zero_heatmap = torch.cuda.FloatTensor(int(
        cfg.MODEL.HEATMAP_SIZE[0]), int(cfg.MODEL.HEATMAP_SIZE[1])).fill_(0)
    # compute output heatmap
    output = pose_model(model_inputs.to(CTX))

    # using heatmap, get inverse transformed coordinates
    coords, _ = get_final_preds(cfg,
                                output.cpu().detach().numpy(),
                                np.asarray(centers), np.asarray(scales))

    for idx1, mat in enumerate(coords[0]):
        x_coord, y_coord = int(mat[0]), int(mat[1])
        if not (in_box(x_coord, y_coord, box)):
            coords[0][idx1] = [-1, -1]
            output[0][idx1] = zero_heatmap

    return output, coords
Пример #5
0
def validate(config, dataset, outputs, targets, ids, output_dir, writer_dict=None):
    losses = AverageMeter()
    acc = AverageMeter()
    num_samples = dataset.num_images
    all_preds = np.zeros(
        (num_samples, config['HEAD']['num_keypoints'], 3),
        dtype=np.float32
    )
    all_boxes = np.zeros((num_samples, 6))
    image_path = []
    idx = 0
    for i, (output, target, id) in enumerate(zip(outputs, targets, ids)): # output is a list of batches

        num_images_b = len(output)
        meta = [dataset.augmented_db[str(i)] for i in id]
        # measure accuracy and record loss
        o = np.array([x.transpose(2, 0, 1) for x in output])
        _, avg_acc, cnt, _ = accuracy(o, np.array([x.transpose(2, 0, 1) for x in target]))
        acc.update(avg_acc, cnt)
        c = np.array([x['center'] for x in meta])
        s = np.array([x['scale'] for x in meta])
        score = np.array([x['score'] for x in meta])
        preds, maxvals = get_final_preds(config, o, c, s)

        all_preds[idx:idx + num_images_b, :, 0:2] = preds[:, :, 0:2]
        all_preds[idx:idx + num_images_b, :, 2:3] = maxvals
        # double check this all_boxes parts
        all_boxes[idx:idx + num_images_b, 0:2] = c[:, 0:2]
        all_boxes[idx:idx + num_images_b, 2:4] = s[:, 0:2]
        all_boxes[idx:idx + num_images_b, 4] = np.prod(s * 200, 1)
        all_boxes[idx:idx + num_images_b, 5] = score
        image_path.extend(np.array([x['image'] for x in meta]))

    name_values, perf_indicator = dataset.evaluate(all_preds, output_dir, all_boxes, image_path)

    model_name = config['MODEL']['name']
    if isinstance(name_values, list):
        for name_value in name_values:
            _print_name_value(name_value, model_name)
    else:
        _print_name_value(name_values, model_name)

    # Update logging dictionary with accuracy and loss information
    _update_dict(losses, acc, name_values, writer_dict)

    return name_values, perf_indicator
Пример #6
0
def get_pose_estimation_prediction(pose_model, image, centers, scales, box,
                                   transform):
    rotation = 0

    #print("img shape ", image.shape)
    #print("centers ", centers)
    #print("scales ", scales)
    #print(box)
    # pose estimation transformation
    model_inputs = []
    for center, scale in zip(centers, scales):
        trans = get_affine_transform(center, scale, rotation,
                                     cfg.MODEL.IMAGE_SIZE)
        #print("trans", trans)
        # Crop smaller image of people
        model_input = cv2.warpAffine(
            image,
            trans,
            (int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
            flags=cv2.INTER_LINEAR)

        img = model_input
        cv2.imwrite('trans_input.jpg', img)
        # hwc -> 1chw
        model_input = transform(model_input)  #.unsqueeze(0)
        model_inputs.append(model_input)

    # n * 1chw -> nchw
    model_inputs = torch.stack(model_inputs)
    #zero_heatmap = np.zeros((120, 120), dtype=np.float32)
    zero_heatmap = torch.cuda.FloatTensor(120, 120).fill_(0)
    # compute output heatmap
    output = pose_model(model_inputs.to(CTX))
    # heatmap output :
    coords, _ = get_final_preds(cfg,
                                output.cpu().detach().numpy(),
                                np.asarray(centers), np.asarray(scales))

    for idx1, mat in enumerate(coords[0]):
        x_coord, y_coord = int(mat[0]), int(mat[1])
        if not (in_box(x_coord, y_coord, box)):
            coords[0][idx1] = [-1, -1]
            output[0][idx1] = zero_heatmap

    return output, coords
Пример #7
0
    def detect_skeleton_on_single_human(self, image, box):
        '''
        input: image read by opencv2
        '''

        data_numpy = image.copy()

        # object detection box
        if box is None:
            box = [0, 0, data_numpy.shape[0], data_numpy.shape[1]]
        c, s = self._box2cs(box, data_numpy.shape[0], data_numpy.shape[1])
        r = 0

        trans = get_affine_transform(c, s, r, config.MODEL.IMAGE_SIZE)
        input = cv2.warpAffine(
            data_numpy,
            trans,
            (int(config.MODEL.IMAGE_SIZE[0]), int(config.MODEL.IMAGE_SIZE[1])),
            flags=cv2.INTER_LINEAR)

        input = self.transform(input).unsqueeze(0)

        with torch.no_grad():
            # compute output heatmap
            output = self.model(input)
            output = output.clone().cpu().numpy()

            # heatmap = output
            # heatmap_hand = heatmap[0][self.target_kps[0]]
            # print(heatmap.shape)
            # for kk in self.target_kps[1:]:
            #     heatmap_hand += heatmap[0][kk]
            # cv2.imshow('skeletons', heatmap_hand)
            # cv2.waitKey()

            # compute coordinate
            preds, maxvals = get_final_preds(config, output, np.asarray([c]),
                                             np.asarray([s]))

            return preds[0]
Пример #8
0
def validate(config, loader, dataset, model, criterion, output_dir,
             writer_dict=None, **kwargs):
    model.eval()
    batch_time = AverageMeter()
    losses = AverageMeter()
    avg_acc = AverageMeter()

    nview = len(config.SELECTED_VIEWS)
    nsamples = len(dataset) * nview
    njoints = config.NETWORK.NUM_JOINTS
    height = int(config.NETWORK.HEATMAP_SIZE[0])
    width = int(config.NETWORK.HEATMAP_SIZE[1])
    all_preds = np.zeros((nsamples, njoints, 3), dtype=np.float32)
    all_heatmaps = np.zeros(
        (nsamples, njoints, height, width), dtype=np.float32)

    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input_, target_, weight_, meta_) in enumerate(loader):
            batch = input_.shape[0]
            output, extra = model(input_, **meta_)

            input = merge_first_two_dims(input_)
            target = merge_first_two_dims(target_)
            weight = merge_first_two_dims(weight_)
            meta = dict()
            for kk in meta_:
                meta[kk] = merge_first_two_dims(meta_[kk])

            target_cuda = target.cuda()
            weight_cuda = weight.cuda()
            loss = criterion(output, target_cuda, weight_cuda)

            nimgs = input.size()[0]
            losses.update(loss.item(), nimgs)

            _, acc, cnt, pre = accuracy(output.detach().cpu().numpy(), target.detach().cpu().numpy(), thr=0.083)
            avg_acc.update(acc, cnt)

            batch_time.update(time.time() - end)
            end = time.time()

            pred, maxval = get_final_preds(config,
                                           output.clone().cpu().numpy(),
                                           meta['center'],
                                           meta['scale'])

            pred = pred[:, :, 0:2]
            pred = np.concatenate((pred, maxval), axis=2)

            all_preds[idx:idx + nimgs] = pred
            all_heatmaps[idx:idx + nimgs] = output.cpu().numpy()
            # image_only_heatmaps[idx:idx + nimgs] = img_detected.cpu().numpy()
            idx += nimgs

            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(loader), batch_time=batch_time,
                          loss=losses, acc=avg_acc)
                logger.info(msg)

                view_name = 'view_{}'.format(0)
                prefix = '{}_{}_{:08}'.format(
                    os.path.join(output_dir, 'validation'), view_name, i)
                meta_for_debug_imgs = dict()
                meta_for_debug_imgs['joints_vis'] = meta['joints_vis']
                meta_for_debug_imgs['joints_2d_transformed'] = meta['joints_2d_transformed']
                save_debug_images(config, input, meta_for_debug_imgs, target,
                                  pre * 4, extra['origin_hms'], prefix)
                if 'fused_hms' in extra:
                    fused_hms = extra['fused_hms']
                    prefix = '{}_{}_{:08}'.format(
                        os.path.join(output_dir, 'fused_hms'), view_name, i)
                    save_debug_heatmaps(config, input, meta_for_debug_imgs, target,
                                      pre * 4, fused_hms, prefix)

        detection_thresholds = [0.075, 0.05, 0.025, 0.0125]  # 150,100,50,25 mm
        perf_indicators = []
        cur_time = time.strftime("%Y-%m-%d-%H-%M", time.gmtime())
        for thresh in detection_thresholds:
            name_value, perf_indicator, per_grouping_detected = dataset.evaluate(all_preds, threshold=thresh)
            perf_indicators.append(perf_indicator)
            names = name_value.keys()
            values = name_value.values()
            num_values = len(name_value)
            _, full_arch_name = get_model_name(config)
            logger.info('Detection Threshold set to {} aka {}mm'.format(thresh, thresh * 2000.0))
            logger.info('| Arch   ' +
                        '  '.join(['| {: <5}'.format(name) for name in names]) + ' |')
            logger.info('|--------' * (num_values + 1) + '|')
            logger.info('| ' + '------ ' +
                        ' '.join(['| {:.4f}'.format(value) for value in values]) +
                        ' |')
            logger.info('| ' + full_arch_name)
            logger.info('Overall Perf on threshold {} is {}\n'.format(thresh, perf_indicator))
            logger.info('\n')
            if per_grouping_detected is not None:
                df = pd.DataFrame(per_grouping_detected)
                save_path = os.path.join(output_dir, 'grouping_detec_rate_{}_{}.csv'.format(thresh, cur_time))
                df.to_csv(save_path)

        # save heatmaps and joint locations
        u2a = dataset.u2a_mapping
        a2u = {v: k for k, v in u2a.items() if v != '*'}
        a = list(a2u.keys())
        u = np.array(list(a2u.values()))

        save_file = config.TEST.HEATMAP_LOCATION_FILE
        file_name = os.path.join(output_dir, save_file)
        file = h5py.File(file_name, 'w')
        file['heatmaps'] = all_heatmaps[:, u, :, :]
        file['locations'] = all_preds[:, u, :]
        file['joint_names_order'] = a
        file.close()

    return perf_indicators[3]  # 25mm as indicator
Пример #9
0
def validate(config, val_loader, val_dataset, model, criterion, output_dir,
             tb_log_dir, writer_dict=None):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_dataset)#2958
    all_preds = np.zeros(
        (num_samples, config.MODEL.NUM_JOINTS, 3),
        dtype=np.float32
    )
    all_boxes = np.zeros((num_samples, 6))
    image_path = []
    filenames = []
    imgnums = []
    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(val_loader):
            # compute output
            outputs = model(input)#<class 'torch.Tensor'> torch.Size([64, 16, 64, 64])
            if isinstance(outputs, list):
                # output = outputs[-1]#只输出最后一个
                l=0
                c=1
                j=0
                k=0
                # output = (l*outputs[0]+c*outputs[1]+j*outputs[2]+k*outputs[3])
                output = (c*outputs[0]+j*outputs[1]+k*outputs[2])
            else:
                output = outputs

            if config.TEST.FLIP_TEST:
                # this part is ugly, because pytorch has not supported negative index
                # input_flipped = model(input[:, :, :, ::-1])
                input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                input_flipped = torch.from_numpy(input_flipped).cuda()#torch.Size([64, 3, 256, 256])
                outputs_flipped = model(input_flipped)#torch.Size([64, 16, 64, 64])

                if isinstance(outputs_flipped, list):
                    output_flipped = outputs_flipped[-1]
                else:
                    output_flipped = outputs_flipped

                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                           val_dataset.flip_pairs)#将翻转过的输入变成正常的输出
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()


                # feature is not aligned, shift flipped heatmap for higher accuracy
                 #【】为啥翻转的没对齐
                if config.TEST.SHIFT_HEATMAP:
                    output_flipped[:, :, :, 1:] = \
                        output_flipped.clone()[:, :, :, 0:-1] #【c】将0以后的图左移动

                output = (output + output_flipped) * 0.5 #【see】妙啊

            target = target.cuda(non_blocking=True)
            target_weight = target_weight.cuda(non_blocking=True)#target_weight是否可见

            loss = criterion(output, target, target_weight)
            # criterion(output, target, target_weight) #【see】

            num_images = input.size(0)#求平均值用
            # measure accuracy and record loss
            losses.update(loss.item(), num_images)
            _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                             target.cpu().numpy())

            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            c = meta['center'].numpy()
            s = meta['scale'].numpy()#meta只获取当前loader的 64个
            score = meta['score'].numpy()

            preds, maxvals = get_final_preds(
                config, output.clone().cpu().numpy(), c, s)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals#(2958, 16, 3)
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)#沿着axis=1也就是×200后再平方的面积
            all_boxes[idx:idx + num_images, 5] = score
            image_path.extend(meta['image'])#将路径信息传回meta

            idx += num_images

            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(val_loader), batch_time=batch_time,
                          loss=losses, acc=acc)
                logger.info(msg)#len(val_loader)是总的迭代次数

                prefix = '{}_{}'.format(
                    os.path.join(output_dir, 'val'), i
                )#'output/mpii/pose_hrnet/w32_256x256_adam_lr1e-3/val_0'
                save_debug_images(config, input, meta, target, pred*4, output,
                                  prefix)

        name_values, perf_indicator = val_dataset.evaluate(
            config, all_preds, output_dir, all_boxes, image_path,
            filenames, imgnums
        ) #【】这儿的作用是?

        model_name = config.MODEL.NAME#'pose_hrnet'
        if isinstance(name_values, list):
            for name_value in name_values:
                _print_name_value(name_value, model_name)#打印相关精度到终端
        else:
            _print_name_value(name_values, model_name)

        if writer_dict: #【】None 是不是显示损失用的,怎么用
            writer = writer_dict['writer']
            global_steps = writer_dict['valid_global_steps']
            writer.add_scalar(
                'valid_loss',
                losses.avg,
                global_steps
            )
            writer.add_scalar(
                'valid_acc',
                acc.avg,
                global_steps
            )
            if isinstance(name_values, list):
                for name_value in name_values:
                    writer.add_scalars(
                        'valid',
                        dict(name_value),
                        global_steps
                    )
            else:
                writer.add_scalars(
                    'valid',
                    dict(name_values),
                    global_steps
                )
            writer_dict['valid_global_steps'] = global_steps + 1

    return perf_indicator
def get_pose_estimation_prediction(pose_model, image, centers, scales, box, transform):
    rotation = 0

    # print("img shape ", image.shape)
    # print("centers ", centers)
    # print("scales ", scales)
    # print(box)
    # pose estimation transformation
    model_inputs = []
    for center, scale in zip(centers, scales):
        #cv2.imwrite('../data/nlos/nlos_result/first_input.jpg', image)
        trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE)
        # print("trans", trans)
        # Crop smaller image of people
        model_input = cv2.warpAffine(
            image,
            trans,
            (int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
            flags=cv2.INTER_LINEAR)

        #print('model_input(w/ trans)', model_input.shape)
        #img = model_input
        #cv2.imwrite('../data/nlos/nlos_result/trans_input.jpg', img)

        '''
        inv_trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE, inv=1)
        re_model_input = cv2.warpAffine(
            model_input,
            inv_trans,
            (int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
            flags=cv2.INTER_LINEAR)

        print('model_input(w/ inv_trans)', re_model_input.shape)
        img = re_model_input
        cv2.imwrite('../data/nlos/nlos_result/inv_trans_input.jpg', img)
        '''

        # hwc -> 1chw
        model_input = transform(model_input)  # .unsqueeze(0)
        model_inputs.append(model_input)
        #model_input = transform(re_model_input)  # .unsqueeze(0)
        #model_inputs.append(model_input)

    # n * 1chw -> nchw
    model_inputs = torch.stack(model_inputs)
    # zero_heatmap = np.zeros((120, 120), dtype=np.float32)
    zero_heatmap = torch.cuda.FloatTensor(120, 120).fill_(0)
    # compute output heatmap
    output = pose_model(model_inputs.to(CTX))
    # heatmap output :

    coords, _ = get_final_preds(
        cfg,
        output.cpu().detach().numpy(),
        np.asarray(centers),
        np.asarray(scales))
    #print(coords)
    # Transform back

    #coords, _ = get_max_preds(output.cpu().detach().numpy())
    #print("heatmap?", output.shape)
    for idx1, mat in enumerate(coords[0]):
        x_coord, y_coord = int(mat[0]), int(mat[1])
        if not (in_box(x_coord, y_coord, box)):
            #print("{} {} {}".format(idx1, x_coord, y_coord))
            coords[0][idx1] = [-1, -1]
            output[0][idx1] = zero_heatmap
        #print(coords[0][idx1])

    #output_np = output.cpu().detach().numpy()
    #tmp = np.concatenate((output_np[0][0, :, :].reshape(1, 120, 120), output_np[0][5:, :, :]), axis=0)
    #tmp = tmp.resize(1, 13, 120, 120)
    #for i in range(4):
    #    output[0][i+1] = zero_heatmap
    #print("model_inputs {} output {}".format(model_inputs.shape, output.shape))
    #save_batch_heatmaps(
    #    model_inputs, output, 'test_hm_pred.jpg'
    #    #model_inputs, tmp, 'test_hm_pred.jpg'
    #)

    return output, coords
def validate(config,
             val_loader,
             val_dataset,
             model,
             criterion,
             output_dir,
             tb_log_dir,
             writer_dict=None):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float32)
    all_boxes = np.zeros((num_samples, 6))
    image_path = []
    filenames = []
    filenames_map = {}
    filenames_counter = 0
    imgnums = []
    idx = 0

    use_warping = config['MODEL']['USE_WARPING_TEST']

    ############3
    preds_output_dir = config.OUTPUT_DIR + 'keypoint_preds/'
    if config.SAVE_PREDS:
        output_filenames_map_file = preds_output_dir + 'filenames_map.npy'
        if not os.path.exists(preds_output_dir):
            os.makedirs(preds_output_dir)
    ####################

    with torch.no_grad():
        end = time.time()
        if not use_warping:
            for i, (input, target, target_weight,
                    meta) in enumerate(val_loader):

                for ff in range(len(meta['image'])):
                    cur_nm = meta['image'][ff]
                    if not cur_nm in filenames_map:
                        filenames_map[cur_nm] = [filenames_counter]
                    else:
                        filenames_map[cur_nm].append(filenames_counter)
                    filenames_counter += 1

                # compute output
                outputs = model(input)
                if isinstance(outputs, list):
                    output = outputs[-1]
                else:
                    output = outputs

                target = target.cuda(non_blocking=True)
                target_weight = target_weight.cuda(non_blocking=True)

                loss = criterion(output, target, target_weight)

                num_images = input.size(0)
                # measure accuracy and record loss
                losses.update(loss.item(), num_images)
                _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                                 target.cpu().numpy())

                acc.update(avg_acc, cnt)

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                c = meta['center'].numpy()
                s = meta['scale'].numpy()
                score = meta['score'].numpy()

                preds, maxvals = get_final_preds(config,
                                                 output.clone().cpu().numpy(),
                                                 c, s)

                all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
                all_preds[idx:idx + num_images, :, 2:3] = maxvals
                # double check this all_boxes parts
                all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
                all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
                all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
                all_boxes[idx:idx + num_images, 5] = score
                image_path.extend(meta['image'])

                idx += num_images

                if i % config.PRINT_FREQ == 0:
                    msg = 'Test: [{0}/{1}]\t' \
                          'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                          'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                          'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                              i, len(val_loader), batch_time=batch_time,
                              loss=losses, acc=acc)
                    logger.info(msg)

                    prefix = '{}_{}'.format(os.path.join(output_dir, 'val'), i)
                    save_debug_images(config, input, meta, target, pred * 4,
                                      output, prefix)

            logger.info('###  Method: {} ###'.format(config.EXPERIMENT_NAME))
            name_values, perf_indicator = val_dataset.evaluate(
                config, all_preds, output_dir, all_boxes, filenames_map,
                filenames, imgnums)

            model_name = config.MODEL.NAME
            if isinstance(name_values, list):
                for name_value in name_values:
                    _print_name_value(name_value, model_name)
            else:
                _print_name_value(name_values, model_name)

            if writer_dict:
                writer = writer_dict['writer']
                global_steps = writer_dict['valid_global_steps']
                writer.add_scalar('valid_loss', losses.avg, global_steps)
                writer.add_scalar('valid_acc', acc.avg, global_steps)
                if isinstance(name_values, list):
                    for name_value in name_values:
                        writer.add_scalars('valid', dict(name_value),
                                           global_steps)
                else:
                    writer.add_scalars('valid', dict(name_values),
                                       global_steps)
                writer_dict['valid_global_steps'] = global_steps + 1

        else:  ### PoseWarper
            for i, (input, input_prev1, input_prev2, input_next1, input_next2,
                    target, target_weight, meta) in enumerate(val_loader):

                for ff in range(len(meta['image'])):
                    cur_nm = meta['image'][ff]
                    if not cur_nm in filenames_map:
                        filenames_map[cur_nm] = [filenames_counter]
                    else:
                        filenames_map[cur_nm].append(filenames_counter)
                    filenames_counter += 1

                ###################3
                concat_input = torch.cat((input, input_prev1, input_prev2,
                                          input_next1, input_next2), 1)
                outputs = model(concat_input)
                if isinstance(outputs, list):
                    output = outputs[-1]
                else:
                    output = outputs

                target = target.cuda(non_blocking=True)
                target_weight = target_weight.cuda(non_blocking=True)

                num_images = input.size(0)

                loss = criterion(output, target, target_weight)

                # measure accuracy and record loss
                losses.update(loss.item(), num_images)
                _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                                 target.cpu().numpy())
                #######################

                acc.update(avg_acc, cnt)

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                c = meta['center'].numpy()
                s = meta['scale'].numpy()
                score = meta['score'].numpy()

                preds, maxvals = get_final_preds(config,
                                                 output.clone().cpu().numpy(),
                                                 c, s)

                all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
                all_preds[idx:idx + num_images, :, 2:3] = maxvals
                # double check this all_boxes parts
                all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
                all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
                all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
                all_boxes[idx:idx + num_images, 5] = score
                image_path.extend(meta['image'])

                idx += num_images

                #### Debug ##########
                #name_values, perf_indicator = val_dataset.evaluate(config, all_preds, output_dir, all_boxes, filenames_map, filenames, imgnums)
                #print(xy)
                #################3

                if i % config.PRINT_FREQ == 0:
                    msg = 'Test: [{0}/{1}]\t' \
                          'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                          'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                          'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                              i, len(val_loader), batch_time=batch_time,
                              loss=losses, acc=acc)
                    logger.info(msg)

                    prefix = '{}_{}'.format(os.path.join(output_dir, 'val'), i)
                    save_debug_images(config, input, meta, target, pred * 4,
                                      output, prefix)

            #################################3
            if config.SAVE_PREDS:
                print('Saving preds...')
                output_path = preds_output_dir + 'delta' + str(
                    config.MODEL.TIMESTEP_DELTA) + '_keypoints.h5'
                print(output_path)
                hf = h5py.File(output_path, 'w')
                hf.create_dataset('data', data=all_preds)
                hf.close()

                output_path = preds_output_dir + 'delta' + str(
                    config.MODEL.TIMESTEP_DELTA) + '_boxes.h5'
                hf = h5py.File(output_path, 'w')
                hf.create_dataset('data', data=all_boxes)
                hf.close()

                np.save(output_filenames_map_file, filenames_map)
            ####################
            #print(xy)

            logger.info('###  Method: {} ###'.format(config.EXPERIMENT_NAME))
            name_values, perf_indicator = val_dataset.evaluate(
                config, all_preds, output_dir, all_boxes, filenames_map,
                filenames, imgnums)

            model_name = config.MODEL.NAME
            if isinstance(name_values, list):
                for name_value in name_values:
                    _print_name_value(name_value, model_name)
            else:
                _print_name_value(name_values, model_name)

            if writer_dict:
                writer = writer_dict['writer']
                global_steps = writer_dict['valid_global_steps']
                writer.add_scalar('valid_loss', losses.avg, global_steps)
                writer.add_scalar('valid_acc', acc.avg, global_steps)
                if isinstance(name_values, list):
                    for name_value in name_values:
                        writer.add_scalars('valid', dict(name_value),
                                           global_steps)
                else:
                    writer.add_scalars('valid', dict(name_values),
                                       global_steps)
                writer_dict['valid_global_steps'] = global_steps + 1

    return perf_indicator
Пример #12
0
def validate(config,
             val_loader,
             val_dataset,
             model,
             criterion,
             output_dir,
             tb_log_dir,
             epoch,
             writer_dict=None):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float32)
    all_boxes = np.zeros((num_samples, 6))
    image_path = []
    filenames = []
    imgnums = []
    idx = 0

    logger.info(f'# VALIDATE: EPOCH {epoch}')

    model = add_flops_counting_methods(model)
    model.start_flops_count()
    model.eval()

    flops_per_layer = []
    total_per_layer = []

    with torch.no_grad():
        end = time.time()
        val_iter = val_loader.__iter__()
        num_step = len(val_iter)
        for i in range(num_step):
            input, target, target_weight, meta = next(val_iter)
            input = input.to('cuda', non_blocking=True)

            dynconv_meta = make_dynconv_meta(config, epoch, i)
            outputs, dynconv_meta = model(input, dynconv_meta)

            if 'masks' in dynconv_meta:
                percs, cost, total = dynconv.cost_per_layer(dynconv_meta)
                flops_per_layer.append(cost)
                total_per_layer.append(total)

            output = outputs[-1] if isinstance(outputs, list) else outputs

            # if config.TEST.FLIP_TEST:
            # flip not supported for dynconv
            #     # this part is ugly, because pytorch has not supported negative index
            #     # input_flipped = model(input[:, :, :, ::-1])
            #     input_flipped = np.flip(input.cpu().numpy(), 3).copy()
            #     input_flipped = torch.from_numpy(input_flipped).cuda()
            #     outputs_flipped = model(input_flipped)

            #     if isinstance(outputs_flipped, list):
            #         output_flipped = outputs_flipped[-1]
            #     else:
            #         output_flipped = outputs_flipped

            #     output_flipped = flip_back(output_flipped.cpu().numpy(),
            #                                val_dataset.flip_pairs)
            #     output_flipped = torch.from_numpy(output_flipped.copy()).cuda()

            #     # feature is not aligned, shift flipped heatmap for higher accuracy
            #     if config.TEST.SHIFT_HEATMAP:
            #         output_flipped[:, :, :, 1:] = \
            #             output_flipped.clone()[:, :, :, 0:-1]

            #     output = (output + output_flipped) * 0.5

            target = target.cuda(non_blocking=True)
            target_weight = target_weight.cuda(non_blocking=True)

            loss = criterion(output, target, target_weight)

            num_images = input.size(0)
            # measure accuracy and record loss
            losses.update(loss.item(), num_images)
            _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                             target.cpu().numpy())
            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            score = meta['score'].numpy()

            output_np = output.clone().cpu().numpy()
            preds_rel, maxvals_rel = get_max_preds(output_np)
            preds, maxvals = get_final_preds(config, output_np, c, s)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
            all_boxes[idx:idx + num_images, 5] = score
            image_path.extend(meta['image'])

            idx += num_images

            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(val_loader), batch_time=batch_time,
                          loss=losses, acc=acc)
                logger.info(msg)

                prefix = '{}_{}'.format(os.path.join(output_dir, 'val'), i)

                save_debug_images(config, input, meta, target, pred * 4,
                                  output, prefix)

            if config.DEBUG.PONDER:
                img = viz.frame2mpl(input[0], denormalize=True)
                img = viz.add_skeleton(img,
                                       preds_rel[0] * 4,
                                       maxvals_rel[0],
                                       thres=0.2)

                plt.figure()
                plt.title('input')
                plt.imshow(img)
                ponder_cost = dynconv.ponder_cost_map(dynconv_meta['masks'])
                if ponder_cost is not None:
                    plt.figure()
                    plt.title('ponder cost map')
                    plt.imshow(ponder_cost,
                               vmin=2,
                               vmax=len(dynconv_meta['masks']) - 2)
                    plt.colorbar()
                else:
                    logger.info('Not a sparse model - no ponder cost')
                viz.showKey()

        name_values, perf_indicator = val_dataset.evaluate(
            config, all_preds, output_dir, all_boxes, image_path, filenames,
            imgnums)

        model_name = config.MODEL.NAME
        if isinstance(name_values, list):
            for name_value in name_values:
                _print_name_value(name_value, model_name)
        else:
            _print_name_value(name_values, model_name)

        if writer_dict:
            writer = writer_dict['writer']
            global_steps = writer_dict['valid_global_steps']
            writer.add_scalar('valid_loss', losses.avg, global_steps)
            writer.add_scalar('valid_acc', acc.avg, global_steps)
            if isinstance(name_values, list):
                for name_value in name_values:
                    writer.add_scalars('valid', dict(name_value), global_steps)
            else:
                writer.add_scalars('valid', dict(name_values), global_steps)
            writer_dict['valid_global_steps'] = global_steps + 1

    avg_flops, total_flops, batch_count = model.compute_average_flops_cost()
    logger.info(
        f'# PARAMS: {get_model_parameters_number(model, as_string=False)/1e6} M'
    )
    logger.info(
        f'# FLOPS (multiply-accumulates, MACs): {(total_flops/idx)/1e9} GMacs on {idx} images'
    )

    # some conditional execution statistics
    if len(flops_per_layer) > 0:
        flops_per_layer = torch.cat(flops_per_layer, dim=0)
        total_per_layer = torch.cat(total_per_layer, dim=0)

        perc_per_layer = flops_per_layer / total_per_layer

        perc_per_layer_avg = perc_per_layer.mean(dim=0)
        perc_per_layer_std = perc_per_layer.std(dim=0)

        s = ''
        for perc in perc_per_layer_avg:
            s += f'{round(float(perc), 2)}, '
        logger.info(
            f'# FLOPS (multiply-accumulates MACs) used percentage per layer (average): {s}'
        )

        s = ''
        for std in perc_per_layer_std:
            s += f'{round(float(std), 2)}, '
        logger.info(
            f'# FLOPS (multiply-accumulates MACs) used percentage per layer (standard deviation): {s}'
        )

        exec_cond_flops = int(torch.sum(flops_per_layer)) / idx
        total_cond_flops = int(torch.sum(total_per_layer)) / idx
        logger.info(
            f'# Conditional FLOPS (multiply-accumulates MACs) over all layers (average per image): {exec_cond_flops/1e9} GMac out of {total_cond_flops/1e9} GMac ({round(100*exec_cond_flops/total_cond_flops,1)}%)'
        )

    return perf_indicator
Пример #13
0
def main():
    args = parse_args()
    reset_config(config, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        config, args.cfg, 'valid')

    model = eval('models.' + config.MODEL.NAME + '.get_pose_net')(
        config, is_train=False)
    print(model)
    # config.TEST.MODEL_FILE = "/home/wang/PycharmProjects/tianchi/human-pose-estimation/output/lumbar/lp_net_50/my/checkpoint.pth.tar"

    if config.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(config.TEST.MODEL_FILE))
    else:
        print(final_output_dir)
        model_state_file = os.path.join(final_output_dir,
                                        'final_state.pth.tar')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    # Loading an image
    image_file = args.img_file
    test_set_paths = "../../submit/B_dcm_list.txt"
    save_root = "../../submit/pos_output"
    if not os.path.exists(save_root):
        os.mkdir(save_root)

    with open(test_set_paths) as fin:
        lines = fin.readlines()
        for line in lines:
            img_file = line.strip()
            print(img_file)
            # img_file = "/home/wang/PycharmProjects/tianchi/lumbar_train150/train/study72/image15.dcm"
            # data_numpy = cv2.imread(image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
            data_numpy = dicom2array(img_file)
            input = data_numpy.copy()
            input = input[:, :, np.newaxis]
            h, w, _ = input.shape
            input = cv2.resize(input, (512, 512))

            h_sf = (512 / 128) * (h / 512)
            w_sf = 4 * w / 512.0

            # print(input.shape)
            # object detection box
            # need to be given [left_top, w, h]
            # box = [391, 99, 667-391, 524-99]
            # box = [743, 52, 955-743, 500-52]
            # box = [93, 262, 429-93, 595-262]
            # c, s = _box2cs(box, config.MODEL.IMAGE_SIZE[0], config.MODEL.IMAGE_SIZE[1])
            # print(c)
            # r = 0

            # trans = get_affine_transform(c, s, r, config.MODEL.IMAGE_SIZE)
            # print(trans.shape)
            # input = cv2.warpAffine(
            #     data_numpy,
            #     trans,
            #     (int(config.MODEL.IMAGE_SIZE[0]), int(config.MODEL.IMAGE_SIZE[1])),
            #     flags=cv2.INTER_LINEAR)

            transform = transforms.Compose([
                transforms.ToTensor(),
                # transforms.Normalize(mean=[0.485, 0.456, 0.406],
                #                      std=[0.229, 0.224, 0.225]),
            ])

            input = transform(input).unsqueeze(0)
            # switch to evaluate mode
            model.eval()
            fn = os.path.basename(
                os.path.dirname(img_file)) + "_" + os.path.basename(img_file)
            save_path = os.path.join(save_root, fn.replace("dcm", "txt"))
            res_fout = open(save_path, 'w')
            with torch.no_grad():

                # compute output heatmap
                output = model(input)
                # print(output.shape)
                preds, maxvals = get_final_preds(config,
                                                 output.clone().cpu().numpy())

                image = data_numpy.copy()
                if (len(preds[0]) != 11):
                    print("point num not right:", line, len(preds[0]))
                for mat in preds[0]:
                    x, y = int(mat[0] * w_sf), int(mat[1] * h_sf)
                    res_fout.write(str(x) + "," + str(y) + "\n")
                    # x *=w_sf
                    # y *=h_sf
                    cv2.circle(image, (x, y), 2, (255, 0, 0), 2)

                # vis result
                # cv2.imwrite("test_lp50.jpg", image)
                cv2.imshow('demo', image)
                # print(fn)
                cv2.imwrite(save_root + "/" + fn.replace("dcm", "jpg"), image)
                cv2.waitKey(10)
                # cv2.destroyAllWindows()
            res_fout.close()
def validate(config,
             loader,
             dataset,
             model,
             criterion,
             output_dir,
             writer_dict=None):

    model.eval()
    batch_time = AverageMeter()
    losses = AverageMeter()
    avg_acc = AverageMeter()

    nsamples = len(dataset) * 4
    is_aggre = config.NETWORK.AGGRE
    njoints = config.NETWORK.NUM_JOINTS
    height = int(config.NETWORK.HEATMAP_SIZE[0])
    width = int(config.NETWORK.HEATMAP_SIZE[1])
    all_preds = np.zeros((nsamples, njoints, 3), dtype=np.float32)
    all_heatmaps = np.zeros((nsamples, njoints, height, width),
                            dtype=np.float32)

    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, target, weight, meta) in enumerate(loader):
            raw_features, aggre_features = model(input)
            output = routing(raw_features, aggre_features, is_aggre, meta)

            loss = 0
            target_cuda = []
            for t, w, o in zip(target, weight, output):
                t = t.cuda(non_blocking=True)
                w = w.cuda(non_blocking=True)
                target_cuda.append(t)
                loss += criterion(o, t, w)

            if is_aggre:
                for t, w, r in zip(target, weight, raw_features):
                    t = t.cuda(non_blocking=True)
                    w = w.cuda(non_blocking=True)
                    loss += criterion(r, t, w)
            target = target_cuda

            nimgs = len(input) * input[0].size(0)
            losses.update(loss.item(), nimgs)

            nviews = len(output)
            acc = [None] * nviews
            cnt = [None] * nviews
            pre = [None] * nviews
            for j in range(nviews):
                _, acc[j], cnt[j], pre[j] = accuracy(
                    output[j].detach().cpu().numpy(),
                    target[j].detach().cpu().numpy())
            acc = np.mean(acc)
            cnt = np.mean(cnt)
            avg_acc.update(acc, cnt)

            batch_time.update(time.time() - end)
            end = time.time()

            preds = np.zeros((nimgs, njoints, 3), dtype=np.float32)
            heatmaps = np.zeros((nimgs, njoints, height, width),
                                dtype=np.float32)
            for k, o, m in zip(range(nviews), output, meta):
                pred, maxval = get_final_preds(config,
                                               o.clone().cpu().numpy(),
                                               m['center'].numpy(),
                                               m['scale'].numpy())
                pred = pred[:, :, 0:2]
                pred = np.concatenate((pred, maxval), axis=2)
                preds[k::nviews] = pred
                heatmaps[k::nviews] = o.clone().cpu().numpy()

            all_preds[idx:idx + nimgs] = preds
            all_heatmaps[idx:idx + nimgs] = heatmaps
            idx += nimgs

            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(loader), batch_time=batch_time,
                          loss=losses, acc=avg_acc)
                logger.info(msg)

                for k in range(len(input)):
                    view_name = 'view_{}'.format(k + 1)
                    prefix = '{}_{}_{:08}'.format(
                        os.path.join(output_dir, 'validation'), view_name, i)
                    save_debug_images(config, input[k], meta[k], target[k],
                                      pre[k] * 4, output[k], prefix)

        # save heatmaps and joint locations
        u2a = dataset.u2a_mapping
        a2u = {v: k for k, v in u2a.items() if v != '*'}
        a = list(a2u.keys())
        u = np.array(list(a2u.values()))

        save_file = config.TEST.HEATMAP_LOCATION_FILE
        file_name = os.path.join(output_dir, save_file)
        file = h5py.File(file_name, 'w')
        file['heatmaps'] = all_heatmaps[:, u, :, :]
        file['locations'] = all_preds[:, u, :]
        file['joint_names_order'] = a
        file.close()

        name_value, perf_indicator = dataset.evaluate(all_preds)
        names = name_value.keys()
        values = name_value.values()
        num_values = len(name_value)
        _, full_arch_name = get_model_name(config)
        logger.info('| Arch ' +
                    ' '.join(['| {}'.format(name) for name in names]) + ' |')
        logger.info('|---' * (num_values + 1) + '|')
        logger.info('| ' + full_arch_name + ' ' +
                    ' '.join(['| {:.3f}'.format(value)
                              for value in values]) + ' |')

    return perf_indicator
Пример #15
0
def validate(config, device, val_loader, val_dataset, model, criterion, output_dir,
             tb_log_dir, writer_dict=None):
    """
    valid data를 모델에 넣어 모델을 평가합니다.

    Parameters
    ----------
    config : yacs.config.CfgNode
        config 파일입니다.
    device : torch.device
        GPU 사용시 데이터를 GPU에 넣어주는 객체입니다.
    val_loader : torch.utils.data.dataloader.DataLoader
        validation data Loader.
    val_dataset : dataset.dataset
        validation dataset.
    model : model
        학습하는 모델 객체입니다.
    criterion : torch.nn.modules.loss
        torch의 loss 객체입니다.
    output_dir : str
        결과값이 저장될 경로입니다.
    tb_log_dir : str
        log 파일 위치입니다.
    writer_dict : dict, optional
        실험 기록 dict입니다. The default is None.

    Returns
    -------
    losses.avg : float
        예측된 heatmap loss의 평균값입니다.

    f_losses.avg : float
        예측된 keypoint loss의 평균값입니다.

    """
    
    
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()
    f_losses = AverageMeter()
    
    # switch to evaluate mode
    model.eval()
    
    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(val_loader):
            
            # input과 bbox 객체를 GPU에 넣을 수 있는 객체로 만듭니다.
            input = input.to(device)
            input = input.float()
            target = target.to(device)
            target = target.float()
            
            outputs = model(input)
            if isinstance(outputs, list):
                output = outputs[-1]
            else:
                output = outputs
            
            # 만약 TEST도 FLIP한다면 적용하는 옵션입니다.
            # 기본적으로는 False로 되어있어 통과합니다.
            if config.TEST.FLIP_TEST:
                input_flipped = input.flip(3)
                outputs_flipped = model(input_flipped)

                if isinstance(outputs_flipped, list):
                    output_flipped = outputs_flipped[-1]
                else:
                    output_flipped = outputs_flipped

                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                           val_dataset.flip_pairs)
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()


                # feature is not aligned, shift flipped heatmap for higher accuracy
                if config.TEST.SHIFT_HEATMAP:
                    output_flipped[:, :, :, 1:] = \
                        output_flipped.clone()[:, :, :, 0:-1]

                output = (output + output_flipped) * 0.5
            
            target = target.cuda(non_blocking=True)
            target_weight = target_weight.cuda(non_blocking=True)
            
            loss = criterion(output, target, target_weight)

            num_images = input.size(0)
            # measure accuracy and record loss
            losses.update(loss.item(), num_images)
            _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                             target.cpu().numpy())

            acc.update(avg_acc, cnt)
            
            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            
            # heatmap을 원래 keypoint 데이터로 만들기 위해 meta 데이터의 center, scale 값을 구합니다.
            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            
            # 예측된 heatmap을 keypoint 데이터로 만듭니다.
            preds, maxvals = get_final_preds(
                config, output.clone().cpu().numpy(), c, s)
            

            criterion2 = torch.nn.MSELoss()
            
            trues = meta['origin'][:,:,:2]

            trues = trues.reshape(trues.shape[0],-1)
            
            # 예측된 keypoint 값을 실제 keypoint 값과 비교합니다.
            f_loss = criterion2(torch.from_numpy(preds.reshape(preds.shape[0],-1)), trues)
            f_losses.update(f_loss.item(), num_images)
            
            idx += num_images
            
            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(val_loader), batch_time=batch_time,
                          loss=losses, acc=acc)
                logger.info(msg)

                prefix = '{}_{}'.format(
                    os.path.join(output_dir, 'val'), i
                )
                save_debug_images(config, input, meta, target, pred*4, output,
                                  prefix)
        
        if writer_dict:
            writer = writer_dict['writer']
            global_steps = writer_dict['valid_global_steps']
            writer.add_scalar(
                'valid_loss',
                losses.avg,
                global_steps
            )
            
            writer_dict['valid_global_steps'] = global_steps + 1

    # 예측된 heatmap 값, keypoint 값을 반환합니다.
    return losses.avg, f_losses.avg
def main():
    args = parse_args()
    update_config(cfg, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
        cfg, is_train=False
    )

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
    else:
        model_state_file = os.path.join(
            final_output_dir, 'final_state.pth'
        )
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))
    model.eval()
    # Data loading code
    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
    )
    print(cfg.DATASET.DATASET)
    print(cfg.DATASET.ROOT)
    print(cfg.DATASET.TEST_SET)
    img_sets = img_coco.IMGCOCO(cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,transforms.Compose([transforms.ToTensor(), normalize,]))
    all_imgids = img_sets.image_set
    with torch.no_grad():
        for idx, imid in enumerate(all_imgids):
            #if idx >= 20:
            #    break
            persons, all_bbs, all_scales, ori_img, imname = img_sets.generate_pose_input(imid)
            all_pts = []
            for pid, person in enumerate(persons):
                outputs = model(person)
                #print(outputs.numpy().shape)
                preds, maxvals = get_final_preds(cfg, outputs.clone().cpu().numpy(), [],[])
                kpts = preds[0,:] * 4
                all_pts.append(kpts)
                #print(kpts)
                #print(kpts.astype(np.int32))
                #draw_kpts(ori_persons[pid], kpts)
                #cv2.imshow('people', person)
                #cv2.waitKey()
            vis_img = draw_kpts(ori_img,all_bbs, all_pts, all_scales)
            out_path = os.path.join('results', imname)
            cv2.imwrite(out_path, vis_img)
            
    
    return
    valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([transforms.ToTensor(),normalize,]))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()

    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT
    ).cuda()

    # Data loading code
    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
    )

    return
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS),
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=True
    )

    # evaluate on validation set
    validate(cfg, valid_loader, valid_dataset, model, criterion,
             final_output_dir, tb_log_dir)
Пример #17
0
def inference(config, image_loader, image_dataset, model, output_dir):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(image_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float32)
    all_boxes = np.zeros((num_samples, 5))
    all_image_pathes = []
    all_image_ids = []
    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(image_loader):
            num_images = input.size(0)
            # compute output
            outputs = model(input)
            if isinstance(outputs, list):
                output = outputs[-1]
            else:
                output = outputs

            if config.TEST.FLIP_TEST:
                # this part is ugly, because pytorch has not supported negative index
                # input_flipped = model(input[:, :, :, ::-1])
                input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                input_flipped = torch.from_numpy(input_flipped).cuda()
                outputs_flipped = model(input_flipped)
                if isinstance(outputs_flipped, list):
                    output_flipped = outputs_flipped[-1]
                else:
                    output_flipped = outputs_flipped

                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                           image_dataset.flip_pairs)
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()

                # feature is not aligned, shift flipped heatmap for higher accuracy
                if config.TEST.SHIFT_HEATMAP:
                    output_flipped[:, :, :, 1:] = \
                        output_flipped.clone()[:, :, :, 0:-1]
                    # output_flipped[:, :, :, 0] = 0

                output = (output + output_flipped) * 0.5

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            score = meta['score'].numpy()
            tlwhs = meta['bbox_tlwh'].numpy()
            output = output.data.cpu()

            preds, maxvals = get_final_preds(config, output.numpy(), c, s)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:4] = tlwhs
            all_boxes[idx:idx + num_images, 4] = score
            all_image_pathes.extend(meta['image'])
            if config.DATASET.DATASET == 'mot':
                seq_names, frame_ids = meta['image_id']
                frame_ids = frame_ids.numpy().astype(int)
                all_image_ids.extend(list(zip(seq_names, frame_ids)))
            elif config.DATASET.DATASET == 'aifi':
                all_image_ids.extend(meta['image_id'])

            idx += num_images

            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(
                    i, len(image_loader), batch_time=batch_time)
                logger.info(msg)

                prefix = '{}_{}'.format(os.path.join(output_dir, 'inference'),
                                        i)
                pred, _ = get_max_preds(output.numpy())
                save_debug_images(config, input, meta, target, pred * 4,
                                  output, prefix)

    # write output
    frame_results = defaultdict(list)
    for image_id, pred, box in zip(all_image_ids, all_preds, all_boxes):
        frame_results[image_id].append(
            (pred.astype(float).tolist(), box.astype(float).tolist()))

    final_results = {}
    for image_id, results in frame_results.items():
        keypoints, boxes = zip(*results)
        final_results[image_id] = {'keypoints': keypoints, 'boxes': boxes}

    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    with open(os.path.join(output_dir, 'box_keypoints.json'), 'w') as f:
        json.dump(final_results, f)
    logger.info('Save results to {}'.format(
        os.path.join(output_dir, 'box_keypoints.json')))
Пример #18
0
def validate(config,
             val_loader,
             val_dataset,
             model,
             criterion,
             output_dir,
             tb_log_dir,
             writer_dict=None):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float)
    all_boxes = np.zeros((num_samples, 6))
    image_path = []
    filenames = []
    filenames_map = {}
    filenames_counter = 0
    imgnums = []
    idx = 0

    ############3
    preds_output_dir = config.OUTPUT_DIR + 'keypoint_preds/'
    if config.SAVE_PREDS:
        output_filenames_map_file = preds_output_dir + 'filenames_map.npy'
        if not os.path.exists(preds_output_dir):
            os.makedirs(preds_output_dir)
    ####################

    use_warping = config['MODEL']['USE_WARPING_TEST']
    use_gt_input = config['MODEL']['USE_GT_INPUT_TEST']
    warping_reverse = config['MODEL']['WARPING_REVERSE']

    ####################################################
    if config.LOAD_PROPAGATED_GT_PREDS:
        output_path = preds_output_dir + 'propagated_gt_preds.h5'
        hf = h5py.File(output_path, 'r')
        all_preds = np.array(hf.get('data'))
        hf.close()

        output_path = preds_output_dir + 'propagated_gt_boxes.h5'
        hf = h5py.File(output_path, 'r')
        all_boxes = np.array(hf.get('data'))
        hf.close()

        output_path = preds_output_dir + 'filenames_map.npy'
        D = np.load(output_path, allow_pickle=True)
        filenames_map = D.item()

        track_preds = None
        logger.info('########################################')
        logger.info('{}'.format(config.EXPERIMENT_NAME))
        name_values, perf_indicator = val_dataset.evaluate(
            config, all_preds, output_dir, all_boxes, filenames_map,
            track_preds, filenames, imgnums)

        model_name = config.MODEL.NAME
        if isinstance(name_values, list):
            for name_value in name_values:
                _print_name_value(name_value, model_name)
        else:
            _print_name_value(name_values, model_name)

        if writer_dict:
            writer = writer_dict['writer']
            global_steps = writer_dict['valid_global_steps']
            writer.add_scalar('valid_loss', losses.avg, global_steps)
            writer.add_scalar('valid_acc', acc.avg, global_steps)
            if isinstance(name_values, list):
                for name_value in name_values:
                    writer.add_scalars('valid', dict(name_value), global_steps)
            else:
                writer.add_scalars('valid', dict(name_values), global_steps)
            writer_dict['valid_global_steps'] = global_steps + 1

        return perf_indicator
    ###################################3

    with torch.no_grad():
        end = time.time()
        if not use_warping:
            for i, (input, target, target_weight,
                    meta) in enumerate(val_loader):

                ########
                for ff in range(len(meta['image'])):
                    cur_nm = meta['image'][ff]

                    if not cur_nm in filenames_map:
                        filenames_map[cur_nm] = [filenames_counter]
                    else:
                        filenames_map[cur_nm].append(filenames_counter)
                    filenames_counter += 1
                #########

                # compute output
                outputs = model(input)
                if isinstance(outputs, list):
                    output = outputs[-1]
                else:
                    output = outputs

                target = target.cuda(non_blocking=True)
                target_weight = target_weight.cuda(non_blocking=True)

                loss = criterion(output, target, target_weight)

                num_images = input.size(0)
                # measure accuracy and record loss
                losses.update(loss.item(), num_images)
                _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                                 target.cpu().numpy())

                acc.update(avg_acc, cnt)

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                c = meta['center'].numpy()
                s = meta['scale'].numpy()
                score = meta['score'].numpy()

                preds, maxvals = get_final_preds(config,
                                                 output.clone().cpu().numpy(),
                                                 c, s)

                all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
                all_preds[idx:idx + num_images, :, 2:3] = maxvals
                # double check this all_boxes parts
                all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
                all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
                all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
                all_boxes[idx:idx + num_images, 5] = score
                image_path.extend(meta['image'])

                idx += num_images

                if i % config.PRINT_FREQ == 0:

                    msg = 'Test: [{0}/{1}]\t' \
                          'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                          'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                          'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                              i, len(val_loader), batch_time=batch_time,
                              loss=losses, acc=acc)
                    logger.info(msg)

                    prefix = '{}_{}'.format(os.path.join(output_dir, 'val'), i)
                    save_debug_images(config, input, meta, target, pred * 4,
                                      output, prefix)

            track_preds = None
            logger.info('########################################')
            logger.info('{}'.format(config.EXPERIMENT_NAME))
            name_values, perf_indicator = val_dataset.evaluate(
                config, all_preds, output_dir, all_boxes, filenames_map,
                track_preds, filenames, imgnums)

            model_name = config.MODEL.NAME
            if isinstance(name_values, list):
                for name_value in name_values:
                    _print_name_value(name_value, model_name)
            else:
                _print_name_value(name_values, model_name)

            if writer_dict:
                writer = writer_dict['writer']
                global_steps = writer_dict['valid_global_steps']
                writer.add_scalar('valid_loss', losses.avg, global_steps)
                writer.add_scalar('valid_acc', acc.avg, global_steps)
                if isinstance(name_values, list):
                    for name_value in name_values:
                        writer.add_scalars('valid', dict(name_value),
                                           global_steps)
                else:
                    writer.add_scalars('valid', dict(name_values),
                                       global_steps)
                writer_dict['valid_global_steps'] = global_steps + 1

        else:  ### PoseWarper
            for i, (input, input_sup, target, target_weight,
                    meta) in enumerate(val_loader):

                for ff in range(len(meta['image'])):
                    cur_nm = meta['image'][ff]
                    if not cur_nm in filenames_map:
                        filenames_map[cur_nm] = [filenames_counter]
                    else:
                        filenames_map[cur_nm].append(filenames_counter)
                    filenames_counter += 1

                ### concatenating
                if use_gt_input:
                    target_up_op = nn.Upsample(scale_factor=4, mode='nearest')
                    target_up = target_up_op(target)
                    concat_input = torch.cat((input, input_sup, target_up), 1)
                else:
                    if warping_reverse:
                        target_up_op = nn.Upsample(scale_factor=4,
                                                   mode='nearest')
                        target_up = target_up_op(target)
                        concat_input = torch.cat((input, input_sup, target_up),
                                                 1)
                    else:
                        concat_input = torch.cat((input, input_sup), 1)
                ###########

                if not config.LOAD_PREDS:
                    outputs = model(concat_input)

                    if isinstance(outputs, list):
                        output = outputs[-1]
                    else:
                        output = outputs

                    target = target.cuda(non_blocking=True)
                    target_weight = target_weight.cuda(non_blocking=True)

                num_images = input.size(0)

                if config.LOAD_PREDS:
                    loss = 0.0
                    avg_acc = 0.0
                    cnt = 1
                else:
                    loss = criterion(output, target, target_weight)
                    losses.update(loss.item(), num_images)

                    # measure accuracy and record loss
                    _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                                     target.cpu().numpy())

                acc.update(avg_acc, cnt)

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                c = meta['center'].numpy()
                s = meta['scale'].numpy()
                score = meta['score'].numpy()

                if not config.LOAD_PREDS:
                    preds, maxvals = get_final_preds(
                        config,
                        output.clone().cpu().numpy(), c, s)

                    all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
                    all_preds[idx:idx + num_images, :, 2:3] = maxvals
                    # double check this all_boxes parts
                    all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
                    all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
                    all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
                    all_boxes[idx:idx + num_images, 5] = score

                ##############
                image_path.extend(meta['image'])

                idx += num_images

                if i % config.PRINT_FREQ == 0:

                    msg = 'Test: [{0}/{1}]\t' \
                          'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                          'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                          'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                              i, len(val_loader), batch_time=batch_time,
                              loss=losses, acc=acc)
                    logger.info(msg)

                    prefix = '{}_{}'.format(os.path.join(output_dir, 'val'), i)

                    if not config.LOAD_HEATMAPS and not config.LOAD_PREDS:
                        save_debug_images(config, input, meta, target,
                                          pred * 4, output, prefix)

            if config.SAVE_PREDS:
                print('Saving preds...')
                output_path = preds_output_dir + 'delta' + str(
                    config.MODEL.TIMESTEP_DELTA) + '_keypoints.h5'
                #           output_path = preds_output_dir + 'delta'+str(config.MODEL.TIMESTEP_DELTA)+'_th'+str(config.TEST.IMAGE_THRE)+'_keypoints.h5'
                if config.MODEL.WARPING_REVERSE:
                    output_path = output_path.replace('.h5', '_reverse.h5')

                if config.DATASET.TEST_ON_TRAIN:
                    output_path = output_path.replace('.h5', '_train.h5')

                print(output_path)
                hf = h5py.File(output_path, 'w')
                hf.create_dataset('data', data=all_preds)
                hf.close()

                output_path = preds_output_dir + 'delta' + str(
                    config.MODEL.TIMESTEP_DELTA) + '_boxes.h5'
                #           output_path = preds_output_dir + 'delta'+str(config.MODEL.TIMESTEP_DELTA)+'_th'+str(config.TEST.IMAGE_THRE)+'_boxes.h5'
                if config.MODEL.WARPING_REVERSE:
                    output_path = output_path.replace('.h5', '_reverse.h5')

                if config.DATASET.TEST_ON_TRAIN:
                    output_path = output_path.replace('.h5', '_train.h5')
                hf = h5py.File(output_path, 'w')
                hf.create_dataset('data', data=all_boxes)
                hf.close()

#           if config.MODEL.TIMESTEP_DELTA == 0:
#             output_filenames_map_file = output_filenames_map_file.replace('.npy','_th'+str(config.TEST.IMAGE_THRE)+'.npy')
#             print(output_filenames_map_file)
#             np.save(output_filenames_map_file, filenames_map)

            if config.LOAD_PREDS:
                #print('Loading preds...')
                output_path = preds_output_dir + 'delta' + str(
                    config.MODEL.TIMESTEP_DELTA) + '_keypoints' + sfx + '.h5'
                hf = h5py.File(output_path, 'r')
                all_preds = np.array(hf.get('data'))
                hf.close()

                output_path = preds_output_dir + 'delta' + str(
                    config.MODEL.TIMESTEP_DELTA) + '_boxes' + sfx + '.h5'
                hf = h5py.File(output_path, 'r')
                all_boxes = np.array(hf.get('data'))
                hf.close()
            ####################

            if config.MODEL.EVALUATE:
                track_preds = None
                logger.info('########################################')
                logger.info('{}'.format(config.EXPERIMENT_NAME))
                name_values, perf_indicator = val_dataset.evaluate(
                    config, all_preds, output_dir, all_boxes, filenames_map,
                    track_preds, filenames, imgnums)

                model_name = config.MODEL.NAME
                if isinstance(name_values, list):
                    for name_value in name_values:
                        _print_name_value(name_value, model_name)
                else:
                    _print_name_value(name_values, model_name)

                if writer_dict:
                    writer = writer_dict['writer']
                    global_steps = writer_dict['valid_global_steps']
                    writer.add_scalar('valid_loss', losses.avg, global_steps)
                    writer.add_scalar('valid_acc', acc.avg, global_steps)
                    if isinstance(name_values, list):
                        for name_value in name_values:
                            writer.add_scalars('valid', dict(name_value),
                                               global_steps)
                    else:
                        writer.add_scalars('valid', dict(name_values),
                                           global_steps)
                    writer_dict['valid_global_steps'] = global_steps + 1
            else:
                perf_indicator = None

    return perf_indicator
Пример #19
0
def output_preds(config, val_loader, val_dataset, model, criterion,
                 output_dir):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # read the name of each image
    gt_file = os.path.join(config.DATASET.ROOT, 'annot',
                           'label_{}.csv'.format(config.DATASET.TEST_SET))
    image_names = []
    with open(gt_file) as annot_file:
        reader = csv.reader(annot_file, delimiter=',')
        for row in reader:
            image_names.append(row[0])

    # create folder for output heatmaps
    output_heapmap_dir = os.path.join(
        output_dir, 'heatmap_{}'.format(config.DATASET.TEST_SET))
    if not os.path.exists(output_heapmap_dir):
        os.mkdir(output_heapmap_dir)

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float32)
    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(val_loader):
            # compute output
            outputs = model(input)
            if isinstance(outputs, list):
                output = outputs[-1]
            else:
                output = outputs

            if config.TEST.FLIP_TEST:
                # this part is ugly, because pytorch has not supported negative index
                # input_flipped = model(input[:, :, :, ::-1])
                input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                input_flipped = torch.from_numpy(input_flipped).cuda()
                outputs_flipped = model(input_flipped)

                if isinstance(outputs_flipped, list):
                    output_flipped = outputs_flipped[-1]
                else:
                    output_flipped = outputs_flipped

                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                           val_dataset.flip_pairs)
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()

                # feature is not aligned, shift flipped heatmap for higher accuracy
                if config.TEST.SHIFT_HEATMAP:
                    output_flipped[:, :, :, 1:] = \
                        output_flipped.clone()[:, :, :, 0:-1]

                output = (output + output_flipped) * 0.5

            target = target.cuda(non_blocking=True)
            target_weight = target_weight.cuda(non_blocking=True)

            loss = criterion(output, target, target_weight)

            num_images = input.size(0)
            # measure accuracy and record loss
            losses.update(loss.item(), num_images)
            _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                             target.cpu().numpy())

            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            c = meta['center'].numpy()
            s = meta['scale'].numpy()

            preds, maxvals = get_final_preds(config,
                                             output.clone().cpu().numpy(), c,
                                             s)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals

            batch_image_names = image_names[idx:idx + num_images]
            save_heatmaps(output, batch_image_names, output_heapmap_dir)

            idx += num_images

    # output pose in CSV format
    output_pose_path = os.path.join(
        output_dir, 'pose_{}.csv'.format(config.DATASET.TEST_SET))

    output_pose = open(output_pose_path, 'w')

    for p in range(len(all_preds)):
        output_pose.write("%s," % (image_names[p]))
        for k in range(len(all_preds[p]) - 1):
            output_pose.write(
                "%.3f,%.3f,%.3f," %
                (all_preds[p][k][0], all_preds[p][k][1], all_preds[p][k][2]))
        output_pose.write("%.3f,%.3f,%.3f\n" %
                          (all_preds[p][len(all_preds[p]) - 1][0],
                           all_preds[p][len(all_preds[p]) - 1][1],
                           all_preds[p][len(all_preds[p]) - 1][2]))

    output_pose.close()

    # output segments
    img_seg_size = (64, 64)
    segs = [(5, 15, 16, 17), (5, 6, 12, 15), (6, 10, 11, 12), (23, 33, 34, 35),
            (23, 24, 30, 33), (24, 28, 29, 30), (10, 11, 29, 28),
            (11, 12, 30, 29), (12, 13, 31, 30), (13, 14, 32, 31),
            (14, 15, 33, 32), (15, 16, 34, 33), (16, 17, 35, 34)]

    output_segment_dir = os.path.join(
        output_dir, 'segment_{}'.format(config.DATASET.TEST_SET))
    if not os.path.exists(output_segment_dir):
        os.mkdir(output_segment_dir)

    with open(output_pose_path) as input_pose:
        reader = csv.reader(input_pose, delimiter=',')
        for row in reader:
            img_path = os.path.join(config.DATASET.ROOT, 'images',
                                    'image_' + config.DATASET.TEST_SET, row[0])
            img = cv2.imread(img_path)
            height, width, channels = img.shape

            kpts = []
            for k in range(36):
                kpt = (int(round(float(row[k * 3 + 1]))),
                       int(round(float(row[k * 3 + 2]))))
                kpts.append(kpt)

            output_subdir = os.path.join(output_segment_dir, row[0][:-4])
            if not os.path.exists(output_subdir):
                os.mkdir(output_subdir)

            for s in range(len(segs)):
                img_seg = np.zeros([height, width], dtype=np.uint8)
                kpts_seg = []
                for i in segs[s]:
                    kpts_seg.append([kpts[i][0], kpts[i][1]])

                if is_convex(kpts_seg):
                    kpts_seg = np.array([kpts_seg], dtype=np.int32)
                    cv2.fillPoly(img_seg, kpts_seg, 255)
                    img_seg = cv2.resize(img_seg, img_seg_size)
                else:
                    img_seg = np.zeros(img_seg_size, dtype=np.uint8)

                cv2.imwrite(os.path.join(output_subdir, "%02d.jpg" % s),
                            img_seg)
Пример #20
0
def validate_cv(config,
                val_loader,
                val_dataset,
                models,
                criterion,
                output_dir,
                tb_log_dir,
                writer_dict=None):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to evaluate mode
    model1 = models[0]
    model2 = models[1]
    model3 = models[2]
    model4 = models[3]
    model5 = models[4]
    model6 = models[5]
    model1.eval()
    model2.eval()
    model3.eval()
    model4.eval()
    model5.eval()
    model6.eval()

    num_samples = len(val_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float32)
    all_boxes = np.zeros((num_samples, 6))
    image_path = []
    filenames = []
    imgnums = []
    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(val_loader):
            # compute output
            outputs1 = model1(input)
            if isinstance(outputs1, list):
                output1 = outputs1[-1]
            else:
                output1 = outputs1

            outputs2 = model2(input)
            if isinstance(outputs2, list):
                output2 = outputs2[-1]
            else:
                output2 = outputs2

            outputs3 = model3(input)
            if isinstance(outputs3, list):
                output3 = outputs3[-1]
            else:
                output3 = outputs3

            outputs4 = model4(input)
            if isinstance(outputs4, list):
                output4 = outputs4[-1]
            else:
                output4 = outputs4

            outputs5 = model5(input)
            if isinstance(outputs5, list):
                output5 = outputs5[-1]
            else:
                output5 = outputs5

            outputs6 = model6(input)
            if isinstance(outputs6, list):
                output6 = outputs6[-1]
            else:
                output6 = outputs6

            output = (output1 + output2 + output3 + output4 + output5 +
                      output6) / 6

            target = target.cuda(non_blocking=True)
            target_weight = target_weight.cuda(non_blocking=True)

            loss = criterion(output, target, target_weight)

            num_images = input.size(0)
            # measure accuracy and record loss
            losses.update(loss.item(), num_images)
            _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                             target.cpu().numpy())

            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            score = meta['score'].numpy()

            preds, maxvals = get_final_preds(config,
                                             output.clone().cpu().numpy(), c,
                                             s)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
            all_boxes[idx:idx + num_images, 5] = score
            image_path.extend(meta['image'])

            idx += num_images

            if i % 5 == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(val_loader), batch_time=batch_time,
                          loss=losses, acc=acc)
                logger.info(msg)

                prefix = '{}_{}'.format(os.path.join(output_dir, 'val'), i)
                save_debug_images(
                    config, input, meta, target,
                    pred * (config.MODEL.IMAGE_SIZE[0] /
                            float(config.MODEL.HEATMAP_SIZE[0])), output,
                    prefix)

        name_values, perf_indicator = val_dataset.evaluate(
            config, all_preds, output_dir, all_boxes, image_path, filenames,
            imgnums)
Пример #21
0
cv2.imshow('image', input)
cv2.waitKey(1000)

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])
input = transform(input).unsqueeze(0)

# switch to evaluate mode
model.eval()

with torch.no_grad():
    # compute output heatmap
    output = model(input)

    # compute coordinate
    preds, maxvals = get_final_preds(config,
                                     output.clone().cpu().numpy(),
                                     np.asarray([c]), np.asarray([s]))

    # plot
    image = data_numpy.copy()
    for mat in preds[0]:
        x, y = int(mat[0]), int(mat[1])
        cv2.circle(image, (x, y), 2, (255, 0, 0), 2)

    # vis result
    cv2.imshow('res', image)
    cv2.waitKey(0)
Пример #22
0
def run_model(
        config,
        dataset,
        loader,
        model,
        criterion_mse,
        criterion_mpjpe,
        final_output_dir,
        tb_writer=None,
        optimizer=None,
        epoch=None,
        is_train=True,
        **kwargs):
    # preparing meters
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    avg_acc = AverageMeter()
    mpjpe_meters = None
    detail_mpjpes = None
    detail_preds = None
    detail_preds2d = None
    detail_weights = None

    nviews = len(dataset.selected_cam)
    nsamples = len(dataset) * nviews
    njoints = config.NETWORK.NUM_JOINTS
    n_used_joints = config.DATASET.NUM_USED_JOINTS
    height = int(config.NETWORK.HEATMAP_SIZE[0])
    width = int(config.NETWORK.HEATMAP_SIZE[1])
    all_view_weights = []
    all_maxvs = []
    all_nview_vis_gt = np.zeros((len(dataset), n_used_joints), dtype=np.int)

    if not is_train:
        do_save_heatmaps = kwargs['save_heatmaps']
        all_preds = np.zeros((nsamples, njoints, 3), dtype=np.float32)
        all_preds_3d = np.zeros((len(dataset), n_used_joints, 3), dtype=np.float32)
        if do_save_heatmaps:
            all_heatmaps = np.zeros((nsamples, njoints, height, width), dtype=np.float32)
        idx_sample = 0

    if is_train:
        phase = 'train'
        model.train()
        frozen_backbone_bn(model, backbone_name='resnet')  # do not change backbone bn params
    else:
        phase = 'test'
        model.eval()
    with dummy_context_mgr() if is_train else torch.no_grad():
        # if eval then use no_grad context manager
        end = time.time()
        for i, (input_, target_, weight_, meta_) in enumerate(loader):
            data_time.update(time.time() - end)
            debug_bit = False
            batch = input_.shape[0]

            train_2d_backbone = False
            run_view_weight = True

            input = collate_first_two_dims(input_)
            target = collate_first_two_dims(target_)
            weight = collate_first_two_dims(weight_)
            meta = dict()
            for kk in meta_:
                meta[kk] = collate_first_two_dims(meta_[kk])

            extra_params = dict()
            extra_params['run_view_weight'] = run_view_weight
            extra_params['joint_vis'] = weight
            extra_params['run_phase'] = phase

            hms, extra = model(input_, **meta_, **extra_params)  # todo
            output = hms
            origin_hms = extra['origin_hms']
            fused_hms_smax = extra['fused_hms_smax']

            target_cuda = target.cuda(non_blocking=True)
            weight_cuda = weight.cuda(non_blocking=True)
            pose3d_gt = meta_['joints_gt'][:,0,:,:].contiguous().cuda(non_blocking=True)  # (batch, njoint, 3)
            num_total_joints = batch * n_used_joints
            # --- --- forward end here

            joint_2d_loss = extra['joint_2d_loss'].mean()

            # obtain all j3d predictions
            final_preds_name = 'j3d_AdaFuse'
            pred3d = extra[final_preds_name]
            j3d_keys = []
            j2d_keys = []
            for k in extra.keys():
                if 'j3d' in k:
                    j3d_keys.append(k)
                if 'j2d' in k:
                    j2d_keys.append(k)

            # initialize only once
            if mpjpe_meters is None:
                logger.info(j3d_keys)
                mpjpe_meters = dict()
                for k in j3d_keys:
                    mpjpe_meters[k] = AverageMeter()
            if detail_mpjpes is None:
                detail_mpjpes = dict()
                for k in j3d_keys:
                    detail_mpjpes[k] = list()
            if detail_preds is None:
                detail_preds = dict()
                for k in j3d_keys:
                    detail_preds[k] = list()
                detail_preds['joints_gt'] = list()
            if detail_preds2d is None:
                detail_preds2d = dict()
                for k in j2d_keys:
                    detail_preds2d[k] = list()
            if detail_weights is None:
                detail_weights = dict()
                detail_weights['maxv'] = list()
                detail_weights['learn'] = list()

            # save all weights
            maxvs = extra['maxv']  # batch njoint, nview
            for b in range(batch):
                maxvs_tmp = []
                for j in range(n_used_joints):
                    maxv_str = ''.join(['{:.2f}, '.format(v) for v in maxvs[b, j]])
                    maxvs_tmp.append(maxv_str)
                all_maxvs.append(maxvs_tmp)
            view_weight = extra['pred_view_weight']
            for b in range(batch):
                maxvs_tmp = []
                for j in range(n_used_joints):
                    maxv_str = ''.join(['{:.2f}, '.format(v) for v in view_weight[b, j]])
                    maxvs_tmp.append(maxv_str)
                all_view_weights.append(maxvs_tmp)

            nviews_vis = extra['nviews_vis']
            all_nview_vis_gt[i*batch:(i+1)*batch] = nviews_vis.view(batch, n_used_joints).detach().cpu().numpy().astype(np.int)

            joints_vis_3d = torch.as_tensor(nviews_vis >= 2, dtype=torch.float32).cuda()
            for k in j3d_keys:
                preds = extra[k]
                if config.DATASET.TRAIN_DATASET in ['multiview_h36m']:
                    preds = align_to_pelvis(preds, pose3d_gt, 0)

                avg_mpjpe, detail_mpjpe, n_valid_joints = criterion_mpjpe(preds, pose3d_gt, joints_vis_3d=joints_vis_3d, output_batch_mpjpe=True)
                mpjpe_meters[k].update(avg_mpjpe, n=n_valid_joints)
                detail_mpjpes[k].extend(detail_mpjpe.detach().cpu().numpy().tolist())
                detail_preds[k].extend(preds.detach().cpu().numpy())
            detail_preds['joints_gt'].extend(pose3d_gt.detach().cpu().numpy())

            for k in j2d_keys:
                p2d = extra[k]
                p2d = p2d.permute(0, 1, 3, 2).contiguous()
                p2d = p2d.detach().cpu().numpy()
                detail_preds2d[k].extend(p2d)

            maxv_weight = extra['maxv'].detach().cpu().numpy()
            detail_weights['maxv'].extend(maxv_weight)
            learn_weight = extra['pred_view_weight'].detach().cpu().numpy()
            detail_weights['learn'].extend(learn_weight)

            if is_train:
                loss = 0
                if train_2d_backbone:
                    loss_mse = criterion_mse(hms, target_cuda, weight_cuda)
                    loss += loss_mse
                loss += joint_2d_loss

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses.update(loss.item(), len(input))
            else:
                # validation
                loss = 0
                loss_mse = criterion_mse(hms, target_cuda, weight_cuda)
                loss += loss_mse
                losses.update(loss.item(), len(input))
                nimgs = input.shape[0]

            _, acc, cnt, pre = accuracy(output.detach().cpu().numpy(), target.detach().cpu().numpy(), thr=0.083)
            avg_acc.update(acc, cnt)

            batch_time.update(time.time() - end)
            end = time.time()

            # ---- print logs
            if i % config.PRINT_FREQ == 0 or i == len(loader)-1 or debug_bit:
                gpu_memory_usage = torch.cuda.max_memory_allocated(0)  # bytes
                gpu_memory_usage_gb = gpu_memory_usage / 1.074e9
                mpjpe_log_string = ''
                for k in mpjpe_meters:
                    mpjpe_log_string += '{:.1f}|'.format(mpjpe_meters[k].avg)
                msg = 'Ep:{0}[{1}/{2}]\t' \
                      'Speed {speed:.1f} samples/s\t' \
                      'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
                      'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
                      'Acc {acc.val:.3f} ({acc.avg:.3f})\t' \
                      'Memory {memory:.2f}G\t' \
                      'MPJPEs {mpjpe_str}'.format(
                    epoch, i, len(loader), batch_time=batch_time,
                    speed=input.shape[0] / batch_time.val,
                    data_time=data_time, loss=losses, acc=avg_acc, memory=gpu_memory_usage_gb, mpjpe_str=mpjpe_log_string)
                logger.info(msg)

                # ---- save debug images
                view_name = 'view_{}'.format(0)
                prefix = '{}_{}_{:08}'.format(
                    os.path.join(final_output_dir, phase), view_name, i)
                meta_for_debug_imgs = dict()
                meta_for_debug_imgs['joints_vis'] = meta['joints_vis']
                meta_for_debug_imgs['joints_2d_transformed'] = meta['joints_2d_transformed']
                save_debug_images(config, input, meta_for_debug_imgs, target,
                                  pre * 4, origin_hms, prefix)
                # save_debug_images_2(config, input, meta_for_debug_imgs, target,
                #                   pre * 4, output, prefix, suffix='fuse')
                save_debug_images_2(config, input, meta_for_debug_imgs, target,
                                    pre * 0, fused_hms_smax, prefix, suffix='smax', normalize=True, IMG=False)

            if is_train:
                pass
            else:
                pred, maxval = get_final_preds(config,
                                               output.clone().cpu().numpy(),
                                               meta['center'],
                                               meta['scale'])
                pred = pred[:, :, 0:2]
                pred = np.concatenate((pred, maxval), axis=2)
                all_preds[idx_sample:idx_sample + nimgs] = pred
                all_preds_3d[i * batch:(i + 1) * batch] = pred3d.detach().cpu().numpy()
                if do_save_heatmaps:
                    all_heatmaps[idx_sample:idx_sample + nimgs] = output.cpu().numpy()
                idx_sample += nimgs
        # -- End epoch

        if is_train:
            pass
        else:
            cur_time = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
            # save mpjpes
            for k in detail_mpjpes:
                detail_mpjpe = detail_mpjpes[k]
                out_path = os.path.join(final_output_dir, '{}_ep_{}_mpjpes_{}.csv'.format(cur_time, epoch, k,))
                np.savetxt(out_path, detail_mpjpe, delimiter=',')
                logger.info('MPJPE summary: {} {:.2f}'.format(k, np.array(detail_mpjpe).mean()))

            # save preds pose detail into h5
            pred_path = os.path.join(final_output_dir, '{}_ep_{}_3dpreds.h5'.format(cur_time, epoch))
            pred_file = h5py.File(pred_path, 'w')
            for k in detail_preds:
                pred_file[k] = np.array(detail_preds[k])
            for k in detail_preds2d:
                pred_file[k] = np.array(detail_preds2d[k])
            for k in detail_weights:
                pred_file[k] = np.array(detail_weights[k])
            pred_file.close()

            if do_save_heatmaps:
                # save heatmaps and joint locations
                u2a = dataset.u2a_mapping
                a2u = {v: k for k, v in u2a.items() if v != '*'}
                a = list(a2u.keys())
                u = np.array(list(a2u.values()))

                save_file = config.TEST.HEATMAP_LOCATION_FILE
                file_name = os.path.join(final_output_dir, save_file)
                file = h5py.File(file_name, 'w')
                file['heatmaps'] = all_heatmaps[:, u, :, :]
                file['locations'] = all_preds[:, u, :]
                file['joint_names_order'] = a
                file.close()

            return 0
Пример #23
0
def validate(config,
             val_loader,
             val_dataset,
             model,
             criterion,
             output_dir,
             tb_log_dir,
             writer_dict=None):

    #########################コメントアウト
    # batch_time = AverageMeter()
    # losses = AverageMeter()
    # acc = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float32)
    all_boxes = np.zeros((num_samples, 6))
    image_path = []
    filenames = []
    imgnums = []
    idx = 0

    time_veri1.time_start()
    with torch.no_grad():
        # end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(val_loader):
            # compute output
            time_veri1.start_point(time_veri1.t_point0)
            time_veri2.start_point(time_veri2.t_point0)
            gpu_used.gpu_clear()

            # input = input.half()
            outputs = model(input)

            time_veri2.end_point(time_veri2.t_point0)
            time_veri2.start_point(time_veri2.t_point1)

            if isinstance(outputs, list):
                output = outputs[-1]
            else:
                output = outputs

            time_veri2.end_point(time_veri2.t_point1)
            time_veri2.start_point(time_veri2.t_point2)

            # if config.TEST.FLIP_TEST:
            # if config.TEST.FLIP_TEST:
            #     input_flipped = input.flip(3)
            #     outputs_flipped = model(input_flipped)

            #     if isinstance(outputs_flipped, list):
            #         output_flipped = outputs_flipped[-1]
            #     else:
            #         output_flipped = outputs_flipped

            #     output_flipped = flip_back(output_flipped.cpu().numpy(),
            #                                val_dataset.flip_pairs)
            #     output_flipped = torch.from_numpy(output_flipped.copy()).cuda()

            #     # feature is not aligned, shift flipped heatmap for higher accuracy
            #     if config.TEST.SHIFT_HEATMAP:
            #         output_flipped[:, :, :, 1:] = \
            #             output_flipped.clone()[:, :, :, 0:-1]

            #     output = (output + output_flipped) * 0.5

            time_veri2.end_point(time_veri2.t_point2)
            time_veri2.start_point(time_veri2.t_point3)

            # target = target.cuda(non_blocking=True)
            # target_weight = target_weight.cuda(non_blocking=True)
            # loss = criterion(output, target, target_weight)

            num_images = input.size(0)
            # measure accuracy and record loss
            # losses.update(loss.item(), num_images)
            # _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
            #  target.cpu().numpy())

            # acc.update(avg_acc, cnt)

            # measure elapsed time
            # batch_time.update(time.time() - end)
            # end = time.time()

            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            score = meta['score'].numpy()

            time_veri2.end_point(time_veri2.t_point3)
            time_veri2.start_point(time_veri2.t_point4)

            preds, maxvals = get_final_preds(config,
                                             output.clone().cpu().numpy(), c,
                                             s)

            time_veri2.end_point(time_veri2.t_point4)
            time_veri2.start_point(time_veri2.t_point5)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
            all_boxes[idx:idx + num_images, 5] = score
            image_path.extend(meta['image'])

            idx += num_images

            # if i % config.PRINT_FREQ == 0:
            #     # msg = 'Test: [{0}/{1}]\t' \
            #     #       'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
            #     #       'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
            #     #       'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
            #     #           i, len(val_loader), batch_time=batch_time,
            #     #           loss=losses, acc=acc)

            #     msg = 'Test: [{0}/{1}]\t'.format(i, len(val_loader))
            #     logger.info(msg)

            #     prefix = '{}_{}'.format(
            #         os.path.join(output_dir, 'val'), i)

            #######################################コメントアウト
            # save_debug_images(config, input, meta, target, pred*4, output, prefix)

            time_veri2.end_point(time_veri2.t_point5)
            time_veri1.end_point(time_veri1.t_point0)
            logger.info("gpu_batch1:{}".format(gpu_used.gpuinfo()))
            gpu_used.gpu_clear()
            logger.info("gpu_batch2:{}".format(gpu_used.gpuinfo()))

        ########################################################################
        # print("評価の計算('◇')ゞ?")

        # name_values, perf_indicator = val_dataset.evaluate(
        #     config, all_preds, output_dir, all_boxes, image_path,
        #     filenames, imgnums
        # )

        # model_name = config.MODEL.NAME
        # if isinstance(name_values, list):
        #     for name_value in name_values:
        #         _print_name_value(name_value, model_name)
        # else:
        #     _print_name_value(name_values, model_name)

        # if writer_dict:
        #     writer = writer_dict['writer']
        #     global_steps = writer_dict['valid_global_steps']
        #     writer.add_scalar(
        #         'valid_loss',
        #         losses.avg,
        #         global_steps
        #     )
        #     writer.add_scalar(
        #         'valid_acc',
        #         acc.avg,
        #         global_steps
        #     )
        #     if isinstance(name_values, list):
        #         for name_value in name_values:
        #             writer.add_scalars(
        #                 'valid',
        #                 dict(name_value),
        #                 global_steps
        #             )
        #     else:
        #         writer.add_scalars(
        #             'valid',
        #             dict(name_values),
        #             global_steps
        #         )
        #     writer_dict['valid_global_steps'] = global_steps + 1

    #####################################追加##################################
    time_veri1.time_end()
    # logger.info(config)
    logger.info("log:{}".format("HR-NET処理中断終了"))

    logger.info("for文全体:{}".format(time_veri1.t_end - time_veri1.t_start))
    temp = (time_veri1.t_end - time_veri1.t_start)
    logger.info("for文の中:{}".format(time_veri1.time_sum_list(return_all=True)))
    # logger.info("ミニバッチのロード時間:{}".format(temp - time_veri2.all))
    logger.info("⓪~⑤の処理時間:{}sum:{}".format(time_veri2.time_sum_list(),
                                           time_veri2.all))

    logger.info("for文全体対fps_all:{}".format(1 / (temp / 512)))
    logger.info("for文の中対fps_in:{}".format(1 / (time_veri1.all / 512)))

    logger.info("gpu_propety:{}".format(
        gpu_used.gpu_property(device=0, torch_version=True)))
    logger.info("gpu_exit:{}".format(gpu_used.gpuinfo()))
    logger.info("gpu_max,list:{}{}".format(gpu_used.used_max,
                                           gpu_used.used_list))

    logger.info("コピペ{}".format([
        temp, time_veri1.all,
        time_veri2.time_sum_list()[0],
        time_veri2.time_sum_list()[1],
        time_veri2.time_sum_list()[2],
        time_veri2.time_sum_list()[3],
        time_veri2.time_sum_list()[4],
        time_veri2.time_sum_list()[5]
    ]))
    import sys
    sys.exit()
    #####################################追加##################################

    return perf_indicator
Пример #24
0
def main():
    json_data = {}
    args = parse_args()
    update_config(cfg, args)

    if not args.camera:
        # handle video
        cam = cv2.VideoCapture(args.video_input)
        video_length = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
    else:
        cam = cv2.VideoCapture(1)
        video_length = 30

    ret_val, input_image = cam.read()
    # Video writer
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    input_fps = cam.get(cv2.CAP_PROP_FPS)
    out = cv2.VideoWriter(args.video_output, fourcc, input_fps,
                          (input_image.shape[1], input_image.shape[0]))

    #### load pose-hrnet MODEL
    pose_model = model_load(cfg)
    #  pose_model = torch.nn.DataParallel(pose_model, device_ids=[0,1]).cuda()
    pose_model.to(device)

    item = 0
    index = 0
    for i in tqdm(range(video_length - 1)):

        x0 = ckpt_time()
        ret_val, input_image = cam.read()

        #  if args.camera:
        #  #  为取得实时速度,每两帧取一帧预测
        #  if item == 0:
        #  item = 1
        #  continue

        item = 0
        try:
            detections = yolov5_model(input_image)
            # print(detections)
            scores = []
            bboxs = []

            if detections is not None:
                for i, det in enumerate(detections.pred):
                    inputs = inputs[:, [2, 1, 0]]
                    output = pose_model(inputs.to(device))
                    for bbox in complete_bbox:
                        if bbox[4] > 0.25 and bbox[5] == 0:
                            # print("detections", complete_bbox[:4])
                            bboxs.append(bbox[:4])
                            # print("Our scores", bbox[4])
                            scores.append(bbox[4])
                            #print("Our scores", complete_bbox[4])
                            # bbox is coordinate location
            # print("boxes", bboxs)
            # print("scores", scores)
            inputs, origin_img, center, scale = PreProcess(
                input_image, bboxs, scores, cfg)

        except:
            out.write(input_image)
            cv2.namedWindow("enhanced", 0)
            cv2.resizeWindow("enhanced", 960, 480)
            cv2.imshow('enhanced', input_image)
            cv2.waitKey(2)
            continue

        with torch.no_grad():
            # compute output heatmap
            print("We here babby ")
            inputs = inputs[:, [2, 1, 0]]
            output = pose_model(inputs.to(device))
            # print("Output from pose mode", output)
            # compute coordinate
            preds, maxvals = get_final_preds(cfg,
                                             output.clone().cpu().numpy(),
                                             np.asarray(center),
                                             np.asarray(scale))
            json_data[index] = list()
            json_data[index].append(preds.tolist())

            print("Key points", preds)
            index += 1

        image = plot_keypoint(origin_img, preds, maxvals, 0.25)
        out.write(image)
        if args.display:
            ######### 全屏
            #  out_win = "output_style_full_screen"
            #  cv2.namedWindow(out_win, cv2.WINDOW_NORMAL)
            #  cv2.setWindowProperty(out_win, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
            #  cv2.imshow(out_win, image)

            ########### 指定屏幕大小
            cv2.namedWindow("enhanced", cv2.WINDOW_GUI_NORMAL)
            cv2.resizeWindow("enhanced", 960, 480)
            cv2.imshow('enhanced', image)
            cv2.waitKey(1)
            with open('outputs/output.json', 'w') as json_file:
                print(json_data)
                json.dump(json_data, json_file)
Пример #25
0
def predict(config, val_loader, val_dataset, model):
    batch_time = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float32)
    all_boxes = np.zeros((num_samples, 6))
    image_names = []
    orig_boxes = []

    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, meta) in enumerate(val_loader):
            # compute output
            outputs = model(input)
            if isinstance(outputs, list):
                output = outputs[-1]
            else:
                output = outputs

            if config.TEST.FLIP_TEST:
                # this part is ugly, because pytorch has not supported negative index
                # input_flipped = model(input[:, :, :, ::-1])
                input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                input_flipped = torch.from_numpy(input_flipped).cuda()
                outputs_flipped = model(input_flipped)

                if isinstance(outputs_flipped, list):
                    output_flipped = outputs_flipped[-1]
                else:
                    output_flipped = outputs_flipped

                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                           val_dataset.flip_pairs)
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()

                # feature is not aligned, shift flipped heatmap for higher accuracy
                if config.TEST.SHIFT_HEATMAP:
                    output_flipped[:, :, :, 1:] = \
                        output_flipped.clone()[:, :, :, 0:-1]

                output = (output + output_flipped) * 0.5

            num_images = input.size(0)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            score = meta['score'].numpy()

            preds, maxvals = get_final_preds(config,
                                             output.clone().cpu().numpy(), c,
                                             s)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals

            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
            all_boxes[idx:idx + num_images, 5] = score

            names = meta['image']
            image_names.extend(names)
            orig_boxes.extend(meta['origbox'])

            idx += num_images

            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(
                    i, len(val_loader), batch_time=batch_time)
                print(msg)

        return all_preds, all_boxes, image_names, orig_boxes
def main():
    args = parse_args()

    logger, final_output_dir, tb_log_dir = create_logger(
        config, args.cfg, 'test3d')

    prediction_path = os.path.join(final_output_dir,
                                   config.TEST.HEATMAP_LOCATION_FILE)
    # prediction_path = os.path.join(final_output_dir, 'image_only_heatmaps.h5')
    logger.info(prediction_path)
    test_dataset = eval('dataset.' + config.DATASET.TEST_DATASET)(
        config, config.DATASET.TEST_SUBSET, False)

    if config.DATASET.TRAIN_DATASET == 'multiview_h36m':
        from multiviews.h36m_body import HumanBody
        from dataset.heatmap_dataset_h36m import HeatmapDataset, no_mix_collate_fn
    elif config.DATASET.TRAIN_DATASET == 'totalcapture':
        from multiviews.totalcapture_body import HumanBody
        from dataset.heatmap_dataset import HeatmapDataset, no_mix_collate_fn

    all_heatmaps = h5py.File(prediction_path)['heatmaps']
    all_heatmaps = all_heatmaps[()]  # load all heatmaps into ram to avoid a h5 multi-threading bug
    # with open('/data/extra/zhe/projects/multiview-pose-github/all_heatmaps.pkl', 'rb') as f:  # todo
    #     all_heatmaps = pickle.load(f)

    # pairwise_file = os.path.join(config.DATA_DIR, config.PICT_STRUCT.PAIRWISE_FILE)
    # with open(pairwise_file, 'rb') as f:
    #     pairwise = pickle.load(f)['pairwise_constrain']

    # mp = torch.multiprocessing.get_context('spawn')
    # os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    # gpus = [0]  # todo write in config rather than hard code
    # do_bone_vectors = args.withIMU
    # logger.info('Whether use IMU Bone Orientation: {}'.format(str(do_bone_vectors)))
    # dev = torch.device('cuda:{}'.format(gpus[0]))

    grouping = test_dataset.grouping
    db = test_dataset.db
    mpjpes = []
    body = HumanBody()
    body_joints = []
    for j in body.skeleton:
        body_joints.append(j['name'])
    heatmap_dataset = HeatmapDataset(all_heatmaps, db, grouping, body)
    heatmap_loader = torch.utils.data.DataLoader(heatmap_dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=True,
                                                 collate_fn=no_mix_collate_fn)

    # modify df definition if add extra metrics to report
    results_df = pandas.DataFrame(columns=['imgid','subject', 'action', 'subaction', 'mpjpe'] + body_joints)
    # for i, items in tqdm(enumerate(heatmap_loader)):
    for i, items in enumerate(heatmap_loader):
        input_params_all_devices = []
        for item in items:
            # item = items[0]
            heatmaps = item['heatmaps']
            datum = item['datum']
            boxes = item['boxes']
            poses = item['poses']
            cameras = item['cameras']
            limb_length = item['limb_length']
            bone_vectors = item['bone_vectors']

        # preds = []
        # maxvs = []
        nview = heatmaps.shape[0]
        njoints = heatmaps.shape[1]
        # for idv in range(nview):  # nview
        #     hm = heatmaps[idv]
        #     center = boxes[idv]['center']
        #     scale = boxes[idv]['scale']
        #     pred, maxv = get_final_preds(config, hm, center, scale)
        #     preds.append(pred)
        #     maxvs.append(maxv)

        centers = [boxes[i]['center'] for i in range(nview)]
        scales = [boxes[i]['scale'] for i in range(nview)]
        preds, maxvs = get_final_preds(config, heatmaps, centers, scales)

        # obtain joint vis from maxvs by a threshold
        vis_thresh = 0.3
        joints_vis = np.greater(maxvs, vis_thresh)

        # if not np.all(joints_vis):  # for debug
        #     print(maxvs)

        # check if at least two views available for each joints
        valid_views = np.swapaxes(joints_vis, 0, 1).sum(axis=1).reshape(-1)
        # print(valid_views)
        if np.any(valid_views < 2):
            # print(maxvs)
            maxvs_t = np.swapaxes(maxvs, 0, 1).reshape(njoints, nview)  # (njoints, nview)
            sorted_index = np.argsort(maxvs_t, axis=1)
            top2_index = sorted_index[:, ::-1][:, :2]  # large to fewer, select top 2
            top2_vis = np.zeros((njoints, nview), dtype=np.bool)
            for j in range(njoints):
                for ind_view in top2_index[j]:
                    top2_vis[j, ind_view] = True
            top2_vis_reshape = np.transpose(top2_vis).reshape(nview, njoints, 1)
            joints_vis = np.logical_or(joints_vis, top2_vis_reshape)
            logger.info('idx_{:0>6d} sub_{} act_{} subact_{} has some joints whose valid view < 2'.format(
                datum['image_id'], datum['subject'], datum['action'], datum['subaction']))

        poses2ds = np.array(preds)
        pose3d = np.squeeze(triangulate_poses(cameras, poses2ds, joints_vis))

        # for idx_datum, prediction in enumerate(outputs_cat):
        datum = items[0]['datum']
        # gt_poses = datum['joints_gt']
        # mpjpe = np.mean(np.sqrt(np.sum((prediction - gt_poses) ** 2, axis=1)))

        metric = get_one_grouping_metric(datum, pose3d, results_df)
        mpjpe = metric['mpjpe']
        mpjpes.append(mpjpe)

        logger.info('idx_{:0>6d} sub_{} act_{} subact_{} mpjpe is {}'.format(
            datum['image_id'], datum['subject'], datum['action'], datum['subaction'], mpjpe))
            # logger.info(prediction[0], )

        # prediction = rpsm_nn(**input_params)

    logger.info('avg mpjpes on {} val samples is: {}'.format(len(grouping), np.mean(mpjpes)))
    # flag_orient = 'with' if do_bone_vectors else 'without'
    backbone_time = os.path.split(tb_log_dir)[1]
    results_df_save_path = os.path.join(final_output_dir, 'estimate3d_triangulate_{}.csv'.format(backbone_time))
    results_df.to_csv(results_df_save_path)
Пример #27
0
def validate(config,
             loader,
             dataset,
             model,
             criterion,
             output_dir,
             writer_dict=None):

    model.eval()
    batch_time = AverageMeter()
    losses = AverageMeter()
    avg_acc = AverageMeter()

    if config.DATASET.TEST_DATASET == 'multiview_h36m':
        nviews = 4
    elif config.DATASET.TEST_DATASET in [
            'totalcapture', 'panoptic', 'unrealcv'
    ]:
        nviews = len(config.MULTI_CAMS.SELECTED_CAMS)
    else:
        assert 'Not defined dataset'
    nsamples = len(dataset) * nviews
    is_aggre = config.NETWORK.AGGRE
    njoints = config.NETWORK.NUM_JOINTS
    height = int(config.NETWORK.HEATMAP_SIZE[0])
    width = int(config.NETWORK.HEATMAP_SIZE[1])
    all_preds = np.zeros((nsamples, njoints, 3), dtype=np.float32)

    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, target, weight, meta) in enumerate(loader):
            raw_features, aggre_features = model(input)
            output = routing(raw_features, aggre_features, is_aggre, meta)

            loss = 0
            target_cuda = []
            for t, w, o in zip(target, weight, output):
                t = t.cuda(non_blocking=True)
                w = w.cuda(non_blocking=True)
                target_cuda.append(t)
                loss += criterion(o, t, w)

            if is_aggre:
                for t, w, r in zip(target, weight, raw_features):
                    t = t.cuda(non_blocking=True)
                    w = w.cuda(non_blocking=True)
                    loss += criterion(r, t, w)
            target = target_cuda

            nimgs = len(input) * input[0].size(0)
            losses.update(loss.item(), nimgs)

            nviews = len(output)
            acc = [None] * nviews
            cnt = [None] * nviews
            pre = [None] * nviews
            for j in range(nviews):
                _, acc[j], cnt[j], pre[j] = accuracy(
                    output[j].detach().cpu().numpy(),
                    target[j].detach().cpu().numpy(),
                    thr=0.083)
            acc = np.mean(acc)
            cnt = np.mean(cnt)
            avg_acc.update(acc, cnt)

            batch_time.update(time.time() - end)
            end = time.time()

            preds = np.zeros((nimgs, njoints, 3), dtype=np.float32)
            heatmaps = np.zeros((nimgs, njoints, height, width),
                                dtype=np.float32)
            for k, o, m in zip(range(nviews), output, meta):
                pred, maxval = get_final_preds(config,
                                               o.clone().cpu().numpy(),
                                               m['center'].numpy(),
                                               m['scale'].numpy())
                pred = pred[:, :, 0:2]
                pred = np.concatenate((pred, maxval), axis=2)
                preds[k::nviews] = pred
                heatmaps[k::nviews] = o.clone().cpu().numpy()

            all_preds[idx:idx + nimgs] = preds
            # all_heatmaps[idx:idx + nimgs] = heatmaps
            idx += nimgs

            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(loader), batch_time=batch_time,
                          loss=losses, acc=avg_acc)
                logger.info(msg)

                for k in range(len(input)):
                    view_name = 'view_{}'.format(k + 1)
                    prefix = '{}_{}_{:08}'.format(
                        os.path.join(output_dir, 'validation'), view_name, i)
                    save_debug_images(config, input[k], meta[k], target[k],
                                      pre[k] * 4, output[k], prefix)

        detection_thresholds = [0.075, 0.05, 0.025, 0.0125,
                                6.25e-3]  # 150,100,50,25 mm
        perf_indicators = []
        cur_time = time.strftime("%Y-%m-%d-%H-%M", time.gmtime())
        for thresh in detection_thresholds:
            name_value, perf_indicator, per_grouping_detected = dataset.evaluate(
                all_preds, threshold=thresh)
            perf_indicators.append(perf_indicator)
            names = name_value.keys()
            values = name_value.values()
            num_values = len(name_value)
            _, full_arch_name = get_model_name(config)
            logger.info('Detection Threshold set to {} aka {}mm'.format(
                thresh, thresh * 2000.0))
            logger.info('| Arch   ' +
                        '  '.join(['| {: <5}'.format(name)
                                   for name in names]) + ' |')
            logger.info('|--------' * (num_values + 1) + '|')
            logger.info(
                '| ' + '------ ' +
                ' '.join(['| {:.4f}'.format(value)
                          for value in values]) + ' |')
            logger.info('| ' + full_arch_name)
            logger.info('Overall Perf on threshold {} is {}\n'.format(
                thresh, perf_indicator))
            logger.info('\n')
            if per_grouping_detected is not None:
                df = pd.DataFrame(per_grouping_detected)
                save_path = os.path.join(
                    output_dir,
                    'grouping_detec_rate_{}_{}.csv'.format(thresh, cur_time))
                df.to_csv(save_path)

    return perf_indicators[2]
Пример #28
0
def main():
    args = parse_args()
    update_config(cfg, args)

    if args.prevModelDir and args.modelDir:
        # copy pre models for philly
        copy_prev_models(args.prevModelDir, args.modelDir)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'train')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=True)

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(
        os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),
        final_output_dir)
    # logger.info(pprint.pformat(model))

    writer_dict = {
        'writer': SummaryWriter(log_dir=tb_log_dir),
        'train_global_steps': 0,
        'valid_global_steps': 0,
    }

    dump_input = torch.rand(
        (1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))
    writer_dict['writer'].add_graph(model, (dump_input, ))

    logger.info(get_model_summary(model, dump_input))

    model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda()

    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
        shuffle=cfg.TRAIN.SHUFFLE,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY)
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY)

    best_perf = 0.0
    best_model = False
    last_epoch = -1
    optimizer = get_optimizer(cfg, model)
    begin_epoch = cfg.TRAIN.BEGIN_EPOCH
    checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth')

    # if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
    #     logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
    #     checkpoint = torch.load(checkpoint_file)
    #     begin_epoch = checkpoint['epoch']
    #     best_perf = checkpoint['perf']
    #     last_epoch = checkpoint['epoch']
    #     model.load_state_dict(checkpoint['state_dict'])
    #
    #     optimizer.load_state_dict(checkpoint['optimizer'])
    #     logger.info("=> loaded checkpoint '{}' (epoch {})".format(
    #         checkpoint_file, checkpoint['epoch']))

    # checkpoint = torch.load('output/jd/pose_hrnet/crop_face/checkpoint.pth')
    # model.load_state_dict(checkpoint['state_dict'])

    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                        cfg.TRAIN.LR_STEP,
                                                        cfg.TRAIN.LR_FACTOR,
                                                        last_epoch=last_epoch)

    for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
        lr_scheduler.step()

        # train for one epoch
        train(cfg, train_loader, model, criterion, optimizer, epoch,
              final_output_dir, tb_log_dir, writer_dict)

        # evaluate on validation set
        # perf_indicator = validate(
        #     cfg, valid_loader, valid_dataset, model, criterion,
        #     final_output_dir, tb_log_dir, writer_dict
        # )
        #
        # if perf_indicator >= best_perf:
        #     best_perf = perf_indicator
        #     best_model = True
        # else:
        #     best_model = False

        # import tqdm
        # import cv2
        # import numpy as np
        # from lib.utils.imutils import im_to_numpy, im_to_torch
        # flip = True
        # full_result = []
        # for i, (inputs,target, target_weight, meta) in enumerate(valid_loader):
        #     with torch.no_grad():
        #         input_var = torch.autograd.Variable(inputs.cuda())
        #         if flip == True:
        #             flip_inputs = inputs.clone()
        #             for i, finp in enumerate(flip_inputs):
        #                 finp = im_to_numpy(finp)
        #                 finp = cv2.flip(finp, 1)
        #                 flip_inputs[i] = im_to_torch(finp)
        #             flip_input_var = torch.autograd.Variable(flip_inputs.cuda())
        #
        #         # compute output
        #         refine_output = model(input_var)
        #         score_map = refine_output.data.cpu()
        #         score_map = score_map.numpy()
        #
        #         if flip == True:
        #             flip_output = model(flip_input_var)
        #             flip_score_map = flip_output.data.cpu()
        #             flip_score_map = flip_score_map.numpy()
        #
        #             for i, fscore in enumerate(flip_score_map):
        #                 fscore = fscore.transpose((1, 2, 0))
        #                 fscore = cv2.flip(fscore, 1)
        #                 fscore = list(fscore.transpose((2, 0, 1)))
        #                 for (q, w) in train_dataset.flip_pairs:
        #                     fscore[q], fscore[w] = fscore[w], fscore[q]
        #                 fscore = np.array(fscore)
        #                 score_map[i] += fscore
        #                 score_map[i] /= 2
        #
        #         # ids = meta['imgID'].numpy()
        #         # det_scores = meta['det_scores']
        #         for b in range(inputs.size(0)):
        #             # details = meta['augmentation_details']
        #             # imgid = meta['imgid'][b]
        #             # print(imgid)
        #             # category = meta['category'][b]
        #             # print(category)
        #             single_result_dict = {}
        #             single_result = []
        #
        #             single_map = score_map[b]
        #             r0 = single_map.copy()
        #             r0 /= 255
        #             r0 += 0.5
        #             v_score = np.zeros(106)
        #             for p in range(106):
        #                 single_map[p] /= np.amax(single_map[p])
        #                 border = 10
        #                 dr = np.zeros((112 + 2 * border, 112 + 2 * border))
        #                 dr[border:-border, border:-border] = single_map[p].copy()
        #                 dr = cv2.GaussianBlur(dr, (7, 7), 0)
        #                 lb = dr.argmax()
        #                 y, x = np.unravel_index(lb, dr.shape)
        #                 dr[y, x] = 0
        #                 lb = dr.argmax()
        #                 py, px = np.unravel_index(lb, dr.shape)
        #                 y -= border
        #                 x -= border
        #                 py -= border + y
        #                 px -= border + x
        #                 ln = (px ** 2 + py ** 2) ** 0.5
        #                 delta = 0.25
        #                 if ln > 1e-3:
        #                     x += delta * px / ln
        #                     y += delta * py / ln
        #                 x = max(0, min(x, 112 - 1))
        #                 y = max(0, min(y, 112 - 1))
        #                 resy = float((4 * y + 2) / 112 * (450))
        #                 resx = float((4 * x + 2) / 112 * (450))
        #                 # resy = float((4 * y + 2) / cfg.data_shape[0] * (450))
        #                 # resx = float((4 * x + 2) / cfg.data_shape[1] * (450))
        #                 v_score[p] = float(r0[p, int(round(y) + 1e-10), int(round(x) + 1e-10)])
        #                 single_result.append(resx)
        #                 single_result.append(resy)
        #             if len(single_result) != 0:
        #                 result = []
        #                 # result.append(imgid)
        #                 j = 0
        #                 while j < len(single_result):
        #                     result.append(float(single_result[j]))
        #                     result.append(float(single_result[j + 1]))
        #                     j += 2
        #                 full_result.append(result)
        model.eval()

        import numpy as np
        from core.inference import get_final_preds
        from utils.transforms import flip_back
        import csv

        num_samples = len(valid_dataset)
        all_preds = np.zeros((num_samples, 106, 3), dtype=np.float32)
        all_boxes = np.zeros((num_samples, 6))
        image_path = []
        filenames = []
        imgnums = []
        idx = 0
        full_result = []
        with torch.no_grad():
            for i, (input, target, target_weight,
                    meta) in enumerate(valid_loader):
                # compute output
                outputs = model(input)
                if isinstance(outputs, list):
                    output = outputs[-1]
                else:
                    output = outputs

                if cfg.TEST.FLIP_TEST:
                    # this part is ugly, because pytorch has not supported negative index
                    # input_flipped = model(input[:, :, :, ::-1])
                    input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                    input_flipped = torch.from_numpy(input_flipped).cuda()
                    outputs_flipped = model(input_flipped)

                    if isinstance(outputs_flipped, list):
                        output_flipped = outputs_flipped[-1]
                    else:
                        output_flipped = outputs_flipped

                    output_flipped = flip_back(output_flipped.cpu().numpy(),
                                               valid_dataset.flip_pairs)
                    output_flipped = torch.from_numpy(
                        output_flipped.copy()).cuda()

                    # feature is not aligned, shift flipped heatmap for higher accuracy
                    if cfg.TEST.SHIFT_HEATMAP:
                        output_flipped[:, :, :, 1:] = \
                            output_flipped.clone()[:, :, :, 0:-1]

                    output = (output + output_flipped) * 0.5

                target = target.cuda(non_blocking=True)
                target_weight = target_weight.cuda(non_blocking=True)

                loss = criterion(output, target, target_weight)

                num_images = input.size(0)
                # measure accuracy and record loss

                c = meta['center'].numpy()
                s = meta['scale'].numpy()
                # print(c.shape)
                # print(s.shape)
                # print(c[:3, :])
                # print(s[:3, :])
                score = meta['score'].numpy()

                preds, maxvals = get_final_preds(cfg,
                                                 output.clone().cpu().numpy(),
                                                 c, s)

                # print(preds.shape)
                for b in range(input.size(0)):
                    result = []
                    # pic_name=meta['image'][b].split('/')[-1]
                    # result.append(pic_name)
                    for points in range(106):
                        # result.append(str(int(preds[b][points][0])) + ' ' + str(int(preds[b][points][1])))
                        result.append(float(preds[b][points][0]))
                        result.append(float(preds[b][points][1]))

                    full_result.append(result)

                all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
                all_preds[idx:idx + num_images, :, 2:3] = maxvals
                # double check this all_boxes parts
                all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
                all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
                all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
                all_boxes[idx:idx + num_images, 5] = score
                image_path.extend(meta['image'])

                idx += num_images

        # with open('res.csv', 'w', newline='') as f:
        #     writer = csv.writer(f)
        #     writer.writerows(full_result)
        gt = []
        with open("/home/sk49/workspace/cy/jd/val.txt") as f:
            for line in f.readlines():
                rows = list(map(float, line.strip().split(' ')[1:]))
                gt.append(rows)

        error = 0
        for i in range(len(gt)):
            error = NME(full_result[i], gt[i]) + error
        print(error)

        log_file = []
        log_file.append(
            [epoch,
             optimizer.state_dict()['param_groups'][0]['lr'], error])

        with open('log_file.csv', 'a', newline='') as f:
            writer1 = csv.writer(f)
            writer1.writerows(log_file)
            # logger.close()

        logger.info('=> saving checkpoint to {}'.format(final_output_dir))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': cfg.MODEL.NAME,
                'state_dict': model.state_dict(),
                'best_state_dict': model.module.state_dict(),
                # 'perf': perf_indicator,
                'optimizer': optimizer.state_dict(),
            },
            best_model,
            final_output_dir)

    final_model_state_file = os.path.join(final_output_dir, 'final_state.pth')
    logger.info(
        '=> saving final model state to {}'.format(final_model_state_file))
    torch.save(model.module.state_dict(), final_model_state_file)
    writer_dict['writer'].close()
Пример #29
0
def validate(config,
             val_loader,
             val_dataset,
             model,
             criterion,
             output_dir,
             tb_log_dir,
             writer_dict=None):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to evaluate mode
    model.eval()

    num_samples = len(val_dataset)
    all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3),
                         dtype=np.float32)
    all_boxes = np.zeros((num_samples, 6))
    image_path = []
    filenames = []
    imgnums = []
    idx = 0
    with torch.no_grad():
        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(val_loader):
            # compute output
            outputs = model(input)
            if isinstance(outputs, list):
                output = outputs[-1]
            else:
                output = outputs

            if config.TEST.FLIP_TEST:
                # this part is ugly, because pytorch has not supported negative index
                # input_flipped = model(input[:, :, :, ::-1])
                input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                input_flipped = torch.from_numpy(input_flipped).cuda()
                outputs_flipped = model(input_flipped)

                if isinstance(outputs_flipped, list):
                    output_flipped = outputs_flipped[-1]
                else:
                    output_flipped = outputs_flipped

                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                           val_dataset.flip_pairs)
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()

                # feature is not aligned, shift flipped heatmap for higher accuracy
                if config.TEST.SHIFT_HEATMAP:
                    output_flipped[:, :, :, 1:] = \
                        output_flipped.clone()[:, :, :, 0:-1]

                output = (output + output_flipped) * 0.5

            target = target.cuda(non_blocking=True)
            target_weight = target_weight.cuda(non_blocking=True)

            loss = criterion(output, target, target_weight)

            num_images = input.size(0)
            # measure accuracy and record loss
            losses.update(loss.item(), num_images)
            _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                             target.cpu().numpy())

            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            score = meta['score'].numpy()

            preds, maxvals = get_final_preds(config,
                                             output.clone().cpu().numpy(), c,
                                             s)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
            all_boxes[idx:idx + num_images, 5] = score
            image_path.extend(meta['image'])

            idx += num_images

            if i % config.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                      'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                          i, len(val_loader), batch_time=batch_time,
                          loss=losses, acc=acc)
                logger.info(msg)

                prefix = '{}_{}'.format(os.path.join(output_dir, 'val'), i)
                save_debug_images(config, input, meta, target, pred * 4,
                                  output, prefix)

        name_values, perf_indicator = val_dataset.evaluate(
            config, all_preds, output_dir, all_boxes, image_path, filenames,
            imgnums)

        model_name = config.MODEL.NAME
        if isinstance(name_values, list):
            for name_value in name_values:
                _print_name_value(name_value, model_name)
        else:
            _print_name_value(name_values, model_name)

        if writer_dict:
            writer = writer_dict['writer']
            global_steps = writer_dict['valid_global_steps']
            writer.add_scalar('valid_loss', losses.avg, global_steps)
            writer.add_scalar('valid_acc', acc.avg, global_steps)
            if isinstance(name_values, list):
                for name_value in name_values:
                    writer.add_scalars('valid', dict(name_value), global_steps)
            else:
                writer.add_scalars('valid', dict(name_values), global_steps)
            writer_dict['valid_global_steps'] = global_steps + 1

    return perf_indicator
Пример #30
0
def main():
    args = parse_args()
    reset_config(config, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        config, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    # cudnn related setting
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    model = eval('models.' + config.MODEL.NAME + '.get_pose_net')(
        config, is_train=False)

    if config.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(config.TEST.MODEL_FILE))
    else:
        model_state_file = os.path.join(final_output_dir,
                                        'final_state.pth.tar')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    gpus = [int(i) for i in config.GPUS.split(',')]
    model = torch.nn.DataParallel(model, device_ids=gpus).cuda()

    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(
        use_target_weight=config.LOSS.USE_TARGET_WEIGHT).cuda()

    ## Load an image
    if args.input_image:
        image_file = args.input_image
    else:
        image_file = '/home/bh/Downloads/g.jpg'
    data_numpy = cv2.imread(image_file,
                            cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
    if data_numpy is None:
        logger.error('=> fail to read {}'.format(image_file))
        raise ValueError('Fail to read {}'.format(image_file))

    # object detection box
    box = [0, 0, 320, 320]
    c, s = _box2cs(box, data_numpy.shape[0], data_numpy.shape[1])
    r = 0

    #trans = get_affine_transform(c, s, r, config.MODEL.IMAGE_SIZE)
    #print('transform: {}'.format(trans))
    #input = cv2.warpAffine(
    #data_numpy,
    #trans,
    #(int(config.MODEL.IMAGE_SIZE[0]), int(config.MODEL.IMAGE_SIZE[1])),
    #flags=cv2.INTER_LINEAR)
    input = data_numpy

    # vis transformed image
    #cv2.imshow('image', input)
    #cv2.waitKey(3000)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    input = transform(input).unsqueeze(0)

    if args.threshold:
        threshold = args.threshold
    else:
        threshold = 0.5
    print('threshold:{}'.format(threshold))
    # switch to evaluate mode
    model.eval()
    with torch.no_grad():
        # compute output heatmap
        output = model(input)
        # compute coordinate
        preds, maxvals = get_final_preds(config,
                                         output.clone().cpu().numpy(),
                                         np.asarray([c]), np.asarray([s]))
        print('pred: {} maxval: {}'.format(preds, maxvals))
        # plot
        image = data_numpy.copy()
        for i in range(preds[0].shape[0]):
            mat = preds[0][i]
            val = maxvals[0][i]
            x, y = int(mat[0]), int(mat[1])
            if val > threshold:
                cv2.circle(image, (x * 4, y * 4), 2, (255, 0, 0), 2)

        # vis result
        #cv2.imshow('res', image)
        #cv2.waitKey(0)

        cv2.imwrite('output.jpg', image)