a = np.zeros_like(hm_pred)
        a[:, :, :img_w2 // config.hm_stride] = np.flip(
            hm_pred[:, :, :img_w2 // config.hm_stride], 2)
        for conj in config.conjug:
            a[conj] = a[conj[::-1]]
        hm_pred = a
    return hm_pred

    # x, y = encoder.decode_np(hm_pred, scale, config.hm_stride, method='maxoffset')
    # keypoints = np.stack([x, y, np.ones(x.shape)], axis=1).astype(np.int16)
    # return keypoints


if __name__ == '__main__':
    config = Config('trousers')
    n_gpu = pytorch_utils.setgpu('3')
    test_kpda = KPDA(config, db_path, 'test')
    print('Test sample number: %d' % test_kpda.size())
    df = data_frame_template()
    net = CascadePyramidNet(config)
    checkpoint = torch.load(
        root_path +
        'checkpoints/trousers_027_posneu_lgtrain.ckpt')  # must before cuda
    net.load_state_dict(checkpoint['state_dict'])
    net = net.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)
    net.eval()
    encoder = KeypointEncoder()
    for idx in tqdm(range(test_kpda.size())):
        img_path = test_kpda.get_image_path(idx)
示例#2
0
            bboxes2, scores2 = encoder.decode(
                reg_target, cls_target,
                [config.img_max_size, config.img_max_size])
            img = np.transpose(img.numpy(), (1, 2, 0))
            img = ((img * config.sigma + config.mu) * 255).astype(np.uint8)
            draw_bbox(
                img, bboxes.numpy(), scores.numpy(),
                '/home/storage/lsy/fashion/predictions/' + config.clothes +
                '/%d-%d.png' % (i, j), bboxes2.numpy())


if __name__ == '__main__':
    batch_size = 24
    workers = 16
    config = Config('outwear')
    n_gpu = pytorch_utils.setgpu('all')
    test_kpda = KPDA(config, db_path, 'val')
    print('Val sample number: %d' % test_kpda.size())

    net = RetinaNet(config, num_classes=2)
    checkpoint = torch.load(root_path +
                            'checkpoints/rpn_053.ckpt')  # must before cuda
    net.load_state_dict(checkpoint['state_dict'])
    net = net.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)

    test_dataset = DataGenerator(config, test_kpda, phase='test')
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=False,
示例#3
0
    :param output: matrix nx14
    :param target: matrix nx14
    :return: list of 14 elements
    """
    aucs = []
    num_classes = output.shape[1]
    for i in range(num_classes):
        aucs.append(roc_auc_score(target[:, i], output[:, i]))
    return aucs


if __name__ == '__main__':
    config = Config()
    resume = False
    workers = config.test_workers
    n_gpu = pytorch_utils.setgpu(config.test_gpus)
    batch_size = config.test_batch_size_per_gpu * n_gpu

    test_data = CXR2(config.data_dir, 'test')
    print('Test sample number: %d' % test_data.size())

    net = DenseNet121(num_classes=len(test_data.labels))
    checkpoint = torch.load(os.path.join(config.proj_dir, 'checkpoints', 'densenet_024.ckpt'))  # must before cuda
    net.load_state_dict(checkpoint['state_dict'])
    net = net.cuda()
    cudnn.benchmark = True
    net = DataParallel(net).cuda()

    test_generator = DataGenerator(config, test_data, phase='test')
    test_loader = DataLoader(test_generator,
                             batch_size=batch_size,
示例#4
0
        loss_lst.append(loss_output.data.cpu().item())
        target_lst.append(target.data.cpu().numpy())
        output_lst.append((output.data.cpu().numpy()))
    target_lst = np.concatenate(target_lst, axis=0)
    output_lst = np.concatenate(output_lst, axis=0)
    aucs = compute_AUCs(output_lst, target_lst)
    end_time = time.time()
    metrics = [np.mean(loss_lst), aucs]
    return metrics, end_time - start_time


if __name__ == '__main__':
    config = Config()
    resume = False
    workers = config.train_workers
    n_gpu = pytorch_utils.setgpu(config.train_gpus)
    batch_size = config.train_batch_size_per_gpu * n_gpu
    epochs = config.epochs
    base_lr = config.base_lr
    save_dir = config.proj_dir + 'checkpoints/'
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    train_data = CXR2(config.data_dir, 'train')
    val_data = CXR2(config.data_dir, 'val')
    print('Train sample number: %d' % train_data.size())
    print('Val sample number: %d' % val_data.size())
    pos = train_data.get_occurrences()
    neg = [train_data.size() - x for x in pos]

    net = DenseNet121(num_classes=len(train_data.labels))
示例#5
0
                hp = print_heatmap(hp, img, config)
                hp2 = print_heatmap(hp2, img, config)
                hps.append(np.concatenate([hp, hp2], axis=0))
            cv2.imwrite('/home/storage/lsy/fashion/tmp/%d-%d-hm.png' % (i, j), np.concatenate(hps, axis=1))
            cv2.imwrite('/home/storage/lsy/fashion/tmp/%d-%d-kp.png' % (i, j), kp_img)
    print(np.nanmean(np.array(nes)))





if __name__ == '__main__':
    batch_size = 32
    workers = 16
    config = Config('outwear')
    n_gpu = pytorch_utils.setgpu('0,1')
    test_kpda = KPDA(config, db_path, 'val')
    print('Test sample number: %d' % test_kpda.size())

    net = CascadePyramidNet(config) # UNet(config)  #VGG_FCN(config, layer_num=8) #ResidualUNet2D(config)  #
    checkpoint = torch.load(root_path + 'checkpoints/outwear_063_cascade.ckpt')  # must before cuda
    net.load_state_dict(checkpoint['state_dict'])
    net = net.cuda()
    cudnn.benchmark = True
    net = DataParallel(net)

    test_dataset = DataGenerator(config, test_kpda, phase='test')
    test_loader = DataLoader(test_dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=workers,
示例#6
0
        lbls = Variable(lbls.cuda(async=True), volatile=True).squeeze()
        outputs, _ = net(imgs)
        loss_output = loss(outputs, lbls)
        pred = outputs.data.max(1, keepdim=True)[1]
        correct += pred.eq(lbls.data.view_as(pred)).long().cpu().sum()
        total += lbls.size(0)
        metrics.append(loss_output.data[0])
    end_time = time.time()
    metrics = np.asarray(metrics, np.float32)
    return metrics, float(correct) / total, end_time - start_time


if __name__ == '__main__':
    batch_size = 64
    workers = 32
    n_gpu = pytorch_utils.setgpu('5,6')
    epochs = 100
    base_lr = 1e-3
    save_dir = root_path + 'checkpoints/'
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    resume = False

    config = Config()
    net = ResNet_CAM()

    start_epoch = 1
    lr = base_lr
    best_val_loss = float('inf')
    log_mode = 'w'
    if resume:
示例#7
0
    metrics = np.asarray(metrics, np.float32)
    return metrics, end_time - start_time


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='')
    parser.add_argument('-c',
                        '--clothes',
                        help='one clothes type',
                        default='outwear')
    args = parser.parse_args(sys.argv[1:])
    print('Training ' + args.clothes)

    batch_size = 24
    workers = 10
    n_gpu = pytorch_utils.setgpu('4,5,6,7')
    epochs = 1000
    # 256 pixels: SGD L1 loss starts from 1e-2, L2 loss starts from 1e-3
    # 512 pixels: SGD L1 loss starts from 1e-3, L2 loss starts from 1e-4
    base_lr = 1e-3
    save_dir = root_path + 'checkpoints/'
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    resume = False

    config = Config(args.clothes)
    net = CascadePyramidNetV8(config)
    loss = VisErrorLoss()
    train_data = KPDA(config, db_path, 'train')
    val_data = KPDA(config, db_path, 'val')
    print('Train sample number: %d' % train_data.size())