Пример #1
0
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    state_dict = torch.load(args.model_file)
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        model.module.load_state_dict(state_dict)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    nme, predictions = function.inference(config, test_loader, model)
    with open('../data/crabs/crabs_data_test.csv', 'r') as f:
        data = np.loadtxt(f, str, delimiter=",", skiprows=1)
    paths = data[:, 0]
    for index, path in enumerate(paths):
        img = cv2.imread("../data/crabs/images/{}".format(path))
        a = predictions[index]
        b = a.numpy()

        for index, px in enumerate(b):
            # print(tuple(px))
            cv2.circle(img, tuple(px), 1, (0, 0, 255), 3, 8, 0)

        # cv2.imwrite("/home/njtech/Jiannan/crabs/dataset/result_new/{}".format(path.split('/')[-1]), img)

        cv2.imshow("img", img)

        cv2.waitKey(1000) & 0xFF

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)
    if args.onnx_export:
        torch_out = torch.onnx._export(model,
                                       torch.rand(1, 3, config.IMAGE_SIZE),
                                       osp.join(final_output_dir,
                                                args.onnx_export),
                                       export_params=True)
        return

    gpus = list(config.GPUS)
    if gpus[0] > -1:
        model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    if gpus[0] > -1:
        state_dict = torch.load(args.model_file)
    else:
        state_dict = torch.load(args.model_file, map_location='cpu')
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        if gpus[0] > -1:
            model.module.load_state_dict(state_dict)
        else:
            model.load_state_dict(state_dict)

    dataset_type = get_dataset(config)
    dataset = dataset_type(config, is_train=False)

    test_loader = DataLoader(dataset=dataset,
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    ipdb.set_trace()
    nme, predictions = function.inference(config, test_loader, model)

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    # model = models.get_face_alignment_net(config)
    model = eval('models.' + config.MODEL.NAME + '.get_face_alignment_net')(
        config, is_train=True)

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    # state_dict = torch.load(args.model_file)
    # if 'state_dict' in state_dict.keys():
    #     state_dict = state_dict['state_dict']
    #     model.load_state_dict(state_dict)
    # else:
    #     model.module.load_state_dict(state_dict)

    if args.model_file:
        logger.info('=> loading model from {}'.format(args.model_file))
        # model.load_state_dict(torch.load(args.model_file), strict=False)

        model_state = torch.load(args.model_file)
        model.module.load_state_dict(model_state.state_dict())
    else:
        model_state_file = os.path.join(final_output_dir, 'final_state.pth')
        logger.info('=> loading model from {}'.format(model_state_file))
        model_state = torch.load(model_state_file)
        model.module.load_state_dict(model_state)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    nme, predictions = function.inference(config, test_loader, model)

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    #state_dict = torch.load(args.model_file)
    #if 'state_dict' in state_dict.keys():
    #    state_dict = state_dict['state_dict']
    #    model.load_state_dict(state_dict)
    #else:
    #    model.module.load_state_dict(state_dict)

    model = torch.load(args.model_file)
    model.eval()
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    nme, predictions = function.inference(config, test_loader, model)

    import cv2
    img = cv2.imread('data/wflw/images/my3.jpg')
    print(predictions, predictions.shape)
    for item in predictions[0]:
        cv2.circle(img, (item[0], item[1]), 3, (0, 0, 255), -1)
    cv2.imwrite('out.png', img)

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
Пример #5
0
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()

    model = torchvision.models.resnet101(pretrained=config.MODEL.PRETRAINED,
                                         progress=True)
    num_ftrs = model.fc.in_features
    model.fc = torch.nn.Linear(num_ftrs, config.MODEL.OUTPUT_SIZE[0])

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    state_dict = torch.load(args.model_file)
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        model.module.load_state_dict(state_dict)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    predictions = function.inference(config, test_loader, model)

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
def main():
    #
    args = parse_args()
    #
    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')
    #
    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))
    #
    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED
    #
    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)
    #
    gpus = list(config.GPUS)
    #
    # # load model
    state_dict = torch.load(args.model_file)
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        model.module.load_state_dict(state_dict)
    model = nn.DataParallel(model, device_ids=gpus).cuda()
    #
    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    nme, predictions = function.inference(config, test_loader, model)
    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
    target = test_loader.dataset.load_all_pts()
    pred = 16 * predictions
    l = len(pred)
    res = 0.0
    res_tmp = [0.0 for i in range(config.MODEL.NUM_JOINTS)]

    res_tmp = np.array(res_tmp)
    res_temp_x = target - pred
    res_temp_x = res_temp_x[:, :, 0]
    res_temp_y = target - pred
    res_temp_y = res_temp_y[:, :, 1]

    # csv_file_test_x = pd.DataFrame(np.transpose(np.array(pred[:, :, 0])), columns=test_loader.dataset.annotation_files)
    # csv_file_test_y = pd.DataFrame(np.transpose(np.array(pred[:, :, 1])), columns=test_loader.dataset.annotation_files)
    # csv_file_target_x = pd.DataFrame(np.transpose(np.array(target[:, :, 0])), columns=test_loader.dataset.annotation_files)
    # csv_file_target_y = pd.DataFrame(np.transpose(np.array(target[:, :, 1])), columns=test_loader.dataset.annotation_files)

    for i in range(l):
        trans = np.sqrt(
            pow(target[i][0][0] - target[i][1][0], 2) +
            pow(target[i][0][1] - target[i][1][1], 2)) / 30.0
        res_temp_x[i] = res_temp_x[i] / trans
        res_temp_y[i] = res_temp_y[i] / trans
        for j in range(len(target[i])):
            dist = np.sqrt(
                np.power((target[i][j][0] - pred[i][j][0]), 2) +
                np.power((target[i][j][1] - pred[i][j][1]), 2)) / trans
            res += dist
            res_tmp[j] += dist
    res_t = np.sqrt(res_temp_x * res_temp_x + res_temp_y * res_temp_y)
    # pd.DataFrame(data=res_temp_x.data.value).to_csv('res_x')
    # pd.DataFrame(data=res_temp_y.data.value).to_csv('res_y')
    # pd.DataFrame(data=res_t.data.value).to_csv('res_t')
    res_tmp /= np.float(len(pred))
    print(res_tmp)
    print(np.mean(res_tmp))
    res /= (len(pred) * len(pred[0]))
    print(res)
Пример #7
0
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    # cudnn.benchmark = config.CUDNN.BENCHMARK
    # cudnn.determinstic = config.CUDNN.DETERMINISTIC
    # cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    gpus = list(config.GPUS)
    # model = nn.DataParallel(model, device_ids=gpus).cuda()
    model.to("cuda")
    # print(model)
    # load model
    # state_dict = torch.load(args.model_file)
    # print(state_dict)
    # model = torch.load(args.model_file)
    with open(args.model_file, "rb") as fp:
        state_dict = torch.load(fp)
        model.load_state_dict(state_dict)
    # model.load_state_dict(state_dict['state_dict'])
    # if 'state_dict' in state_dict.keys():
    #     state_dict = state_dict['state_dict']
    #     # print(state_dict)
    #     model.load_state_dict(state_dict)
    # else:
    #     model.module.load_state_dict(state_dict)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(
        dataset=dataset_type(config,
                             is_train=False),
        batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY
    )

    predictions  = function.inference(config, test_loader, model)
    # print("len(predictions)", len(predictions))
    # print(predictions[0])
    df_predictions = []
    for pred in predictions:
        row = dict()
        row['file_name'] = pred[0]
        for id_point in range(194):
            row[f'Point_M{id_point}_X'] = int(pred[1][id_point])
            row[f'Point_M{id_point}_Y'] = int(pred[2][id_point])
        df_predictions.append(row)
    df_predictions = pd.DataFrame(df_predictions)
    # print(predictions_meta[0])
    df_predictions.to_csv('pred_test.csv', index=False)
Пример #8
0
def main():

    args = get_args()
    global best_dsc
    #cnn
    with procedure('init model'):
        model = get_model(config)
        model = torch.nn.parallel.DataParallel(model.cuda())

    with procedure('loss and optimizer'):
        criterion = FocalLoss(config.TRAIN.LOSS.GAMMA,
                              config.DATASET.ALPHA).cuda()
        optimizer = optim.Adam(model.parameters(),
                               lr=config.TRAIN.LR,
                               weight_decay=config.TRAIN.LR)
    start_epoch = 0

    if config.TRAIN.RESUME:
        with procedure('resume model'):
            start_epoch, best_acc, model, optimizer = load_model(
                model, optimizer)

    cudnn.benchmark = True
    #normalization
    normalize = transforms.Normalize(mean=config.DATASET.MEAN,
                                     std=config.DATASET.STD)
    trans = transforms.Compose([transforms.ToTensor(), normalize])

    with procedure('prepare dataset'):
        #load data
        data_split = config.DATASET.SPLIT
        with open(data_split) as f:
            data = json.load(f)

        train_dset = MILdataset(data['train_neg'] + data['train_pos'], trans)
        train_loader = DataLoader(train_dset,
                                  batch_size=config.TRAIN.BATCHSIZE,
                                  shuffle=False,
                                  num_workers=config.WORKERS,
                                  pin_memory=True)
        if config.TRAIN.VAL:
            val_dset = MILdataset(data['val_pos'] + data['val_neg'], trans)
            val_loader = DataLoader(val_dset,
                                    batch_size=config.TEST.BATCHSIZE,
                                    shuffle=False,
                                    num_workers=config.WORKERS,
                                    pin_memory=True)

    with procedure('init tensorboardX'):
        train_log_path = os.path.join(
            config.TRAIN.OUTPUT,
            time.strftime('%Y%m%d_%H%M%S', time.localtime()))
        if not os.path.isdir(train_log_path):
            os.makedirs(train_log_path)
        tensorboard_path = os.path.join(train_log_path, 'tensorboard')
        with open(os.path.join(train_log_path, 'cfg.yaml'), 'w') as f:
            print(config, file=f)
        if not os.path.isdir(tensorboard_path):
            os.makedirs(tensorboard_path)
        summary = TensorboardSummary(tensorboard_path)
        writer = summary.create_writer()

    for epoch in range(start_epoch, config.TRAIN.EPOCHS):
        index = []
        for idx, each_scale in enumerate(config.DATASET.MULTISCALE):
            train_dset.setmode(idx)
            #print(len(train_loader), len(train_dset))
            probs = inference(epoch, train_loader, model)
            topk = group_argtopk(train_dset.ms_slideIDX[:], probs,
                                 train_dset.targets[:],
                                 train_dset.ms_slideLen[:], each_scale)
            index.extend([[each[0], each[1]]
                          for each in zip(topk, [idx] * len(topk))])
        train_dset.maketraindata(index)
        train_dset.shuffletraindata()
        train_dset.setmode(-1)
        loss = trainer(epoch, train_loader, model, criterion, optimizer,
                       writer)
        cp('(#r)Training(#)\t(#b)Epoch: [{}/{}](#)\t(#g)Loss:{}(#)'.format(
            epoch + 1, config.TRAIN.EPOCHS, loss))

        if config.TRAIN.VAL and (epoch + 1) % config.TRAIN.VALGAP == 0:
            patch_info = {}
            for idx, each_scale in enumerate(config.DATASET.MULTISCALE):
                val_dset.setmode(idx)
                probs, img_idxs, rows, cols = inference_vt(
                    epoch, val_loader, model)
                res = probs_parser(probs, img_idxs, rows, cols, val_dset,
                                   each_scale)

                for key, val in res.items():
                    if key not in patch_info:
                        patch_info[key] = val
                    else:
                        patch_info[key].extend(val)
            res = []
            dsc = []
            with multiprocessing.Pool(processes=16) as pool:
                for each_img, each_labels in patch_info.items():
                    res.append(
                        pool.apply(get_mask,
                                   (each_img, each_labels, None, False)))
            pool.join()
            for each_res in res:
                dsc.extend([each_val for each_val in each_res.values()])

            dsc = np.array(dsc).mean()
            '''
            maxs = group_max(np.array(val_dset.slideLen), probs, len(val_dset.targets), config.DATASET.MULTISCALE[-1])
            threshold = 0.5
            pred = [1 if x >= threshold else 0 for x in maxs]
            err, fpr, fnr, f1 = calc_err(pred, val_dset.targets)

            cp('(#y)Vaildation\t(#)(#b)Epoch: [{}/{}]\t(#)(#g)Error: {}\tFPR: {}\tFNR: {}\tF1: {}(#)'.format(epoch+1, config.TRAIN.EPOCHS, err, fpr, fnr, f1))
            '''
            cp('(#y)Vaildation\t(#)(#b)Epoch: [{}/{}]\t(#)(#g)DSC: {}(#)'.
               format(epoch + 1, config.TRAIN.EPOCHS, dsc))
            writer.add_scalar('Val/dsc', dsc, epoch)
            if dsc >= best_dsc:
                best_dsc = dsc
                obj = {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_dsc': best_dsc,
                    'optimizer': optimizer.state_dict()
                }
                torch.save(obj,
                           os.path.join(train_log_path, 'BestCheckpoint.pth'))