def run_inference(model_path, test_image_folder, result_folder):
    model = TinyNet()
    model.load_state_dict(torch.load(model_path))
    model = model.float()

    test_folder_pattern = os.path.join(test_image_folder, '*')
    file_paths = sorted(glob(test_folder_pattern))

    if not os.path.exists(result_folder):
        os.makedirs(result_folder)

    for path in file_paths:
        img = Image.open(path).convert('RGBA').convert('RGB')
        filename = path.split('/')[-1]
        logger.debug(f'Processing: {filename}')

        x = test_transform(img)
        x = x.unsqueeze(0)
        logits, bbox_pred = model(x)
        label = logits.cpu().detach()[0].numpy().argmax()
        text = 'visa' if label == 1 else 'mastercard'

        bbox_pred = bbox_pred.cpu().detach()[0].numpy()
        img_bbox = utils.get_image_with_bbox_and_text(img, bbox_pred, text)

        result_path = os.path.join(result_folder, filename)
        img_bbox.save(result_path)
示例#2
0
def main():
    args = parse_args()
    config = get_config(args.config)
    paths = get_config(args.paths)
    params = config['train_params']
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**params['model_params'])
    model.load_state_dict(torch.load(params['weights'])['state_dict'])
    paths = paths['data']

    dataset = TestDataset(image_dir=Path(paths['path']) /
                          Path(paths['test_images']),
                          ids=None,
                          transform=test_transform(
                              **config['data_params']['augmentation_params']))

    loader = DataLoader(dataset=dataset,
                        batch_size=1,
                        shuffle=False,
                        drop_last=False,
                        num_workers=16,
                        pin_memory=torch.cuda.is_available())

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    inferencer = PytorchInference(device)

    for pred, name in tqdm(zip(inferencer.predict(model, loader), dataset.ids),
                           total=len(dataset)):
        np.savez(
            Path(paths['path']) / Path(paths['predictions_path']) /
            f'{name}.npz', pred)
示例#3
0
def main():
    args = parse_args()
    config = get_config(args.config)
    paths = get_config(args.paths)
    params = config['train_params']
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**params['model_params'])
    model.load_state_dict(torch.load(params['weights'])['state_dict'])
    paths = paths['data']

    dataset = TestDataset(image_dir=Path(paths['path']) /
                          Path(paths['test_images']),
                          transform=test_transform(
                              **config['data_params']['augmentation_params']))

    loader = DataLoader(dataset=dataset,
                        batch_size=16,
                        shuffle=False,
                        drop_last=False,
                        num_workers=16,
                        pin_memory=torch.cuda.is_available())

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    inferencer = PytorchInference(device)
    ids = dataset.ids
    predictions = []
    for i, pred in tqdm(enumerate(inferencer.predict(model, loader)),
                        total=len(dataset)):
        print(np.argmax(pred))
        predictions.append([ids[i], np.argmax(pred)])

    predictions = pd.DataFrame(predictions, columns=['fname', 'preds'])
    predictions.to_csv('preds_test.csv')
示例#4
0
 def make_transform(self, stage, is_train=False):
     if is_train:
         if stage['augmentation'] == 'mix_transform':
             transform = mix_transform(**self.params['augmentation_params'])
         else:
             raise KeyError('augmentation does not found')
     else:
         transform = test_transform(**self.params['augmentation_params'])
     return transform
示例#5
0
def main():
    args = parse_args()
    config = get_config(args.config)
    paths = get_config(args.paths)
    params = config['train_params']
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**params['model_params'])
    model.load_state_dict(torch.load(params['weights'])['state_dict'])
    paths = paths['data']

    dataset = TestDataset(
        path=Path(paths['path']),
        image_csv=pd.read_csv(os.path.join(paths['path'], paths['test_images'])),
        transform=test_transform(**config['data_params']['augmentation_params']))

    loader = DataLoader(
        dataset=dataset,
        batch_size=1,
        shuffle=False,
        drop_last=False,
        num_workers=16,
        pin_memory=torch.cuda.is_available())

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    inferencer = PytorchInference(device)
    torch.set_num_threads(20)
    test_csv = pd.read_csv(os.path.join(paths['path'], paths['test_images']))
    print(os.path.join(paths['path'], paths['test_images']))
    # test_csv['predicted_EncodedPixels'] = None  ####

    ## segmentation __
    i = 0
    for prediction in tqdm(inferencer.predict(model, loader), total=len(dataset)):
        #print(test_csv.loc[0, "EncodedPixels"])
        prediction = np.squeeze(prediction).T
        prediction = torch.from_numpy(prediction)
        prediction = torch.nn.Sigmoid()(prediction)
        prediction = (prediction > 0.65).float()
        prediction = prediction.numpy()
        mask = mask_to_rle(cv2.resize(prediction, dsize=(1024, 1024), interpolation=cv2.INTER_NEAREST),1024,1024)
        # mask = run_length_encode(cv2.resize(prediction, dsize=(1024, 1024), interpolation=cv2.INTER_NEAREST))
        if test_csv.loc[i, "EncodedPixels"] != str(-1):
            test_csv.loc[i, "EncodedPixels"] = mask
        i += 1
    # test_csv.to_csv(os.path.join(paths['path'], "../submission_seg_23.csv"), index=False) #train_segmentation_view.csv
    # ## ^^

    # i = 0
    # for prediction in tqdm(inferencer.predict(model, loader), total=len(dataset)):
    #     prediction = torch.from_numpy(prediction)
    #     prediction = torch.squeeze(prediction)
    #     prediction = torch.nn.Sigmoid()(prediction)
    #     prediction = (prediction >= 0.7).float()
    #     if prediction == 0:
    #         test_csv.loc[i, "EncodedPixels"] = -1
    #     i += 1
    test_csv.to_csv(os.path.join(paths['path'], "../stage_2_sample_submission.csv"), index=False)
示例#6
0
    def detect_batch(self, imgs, path_list=None):
        '''
        imgs: list of numpy images. Their shape may have different values.
        '''
        if path_list is None:
            path_list = [None] * len(imgs)

        img_list = [test_transform(img) for img in imgs]
        # test_transform: (height, width, 3) -> (3, height_resized, width_resized)
        img_wrap = torch.stack(img_list, 0)
        meta_list = [{
            'height': img.shape[0],
            'width': img.shape[1],
            'path': p
        } for img, p in zip(imgs, path_list)]

        return self._detect_batch(img_wrap, meta_list)
示例#7
0
class config:
    resume = r'weights/Jan_net_epoch=8batch=308.pth'
    datasets = r'E:\agent3\lab\switch\JPEGImages'


resnet_features = ResNet18Reduced()
net = JanuaryNet(resnet_features, 3)
net.load_state_dict(torch.load(config.resume))
net.eval()

datasets = TestDatasets(config.datasets)

num_test = 10
for img_numpy in datasets:
    img_tensor = test_transform(img_numpy)
    img_wrap = img_tensor.unsqueeze(0)

    loc, conf = net(img_wrap)

    conf_prob = F.softmax(conf, dim=2)
    max_value, max_idx = conf_prob[0].max(0)
    print(max_value)
    print(max_idx)
    loc_decoded = decode(loc, net.priors_center_offset)
    h, w, c = img_numpy.shape
    loc_abs = loc_decoded * torch.tensor([w, h, w, h]).float()

    print(loc_abs[:, max_idx[0]])
    show_detection(img_numpy, loc_abs[:, max_idx[0]])  # Find max conf
    print(loc_abs[:, max_idx[1]])
def main():
    target_columns = [
        'concrete_cement', 'healthy_metal', 'incomplete', 'irregular_metal',
        'other'
    ]

    args = parse_args()
    config = args.train_config
    path = args.path
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**config['train_params']['model_params'])

    predictions = []
    for fold in args.folds:
        fold = int(fold)

        model_name = config['train_params']['name']
        path2weights = os.path.join(path, 'dumps', model_name, str(fold))
        weights = sorted(os.listdir(path2weights))[0]
        path2weights = os.path.join(path2weights, weights)
        print(f'Inference fold: {fold} from checkpoint: {path2weights}')

        model.load_state_dict(torch.load(path2weights)['state_dict'])

        ids = pd.read_csv(os.path.join(path, args.ids))

        if args.verified is not None and args.verified:
            ids = ids[~ids.verified]
        elif args.mode == 'train':
            ids = ids[ids.fold == fold]

        dataset = TestDataset(
            image_dir=path,
            ids=ids,
            transform=test_transform(
                **config['data_params']['augmentation_params']))

        loader = DataLoader(dataset=dataset,
                            batch_size=config['data_params']['batch_size'],
                            shuffle=False,
                            drop_last=False,
                            num_workers=config['data_params']['num_workers'],
                            pin_memory=torch.cuda.is_available())

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        inferencer = PytorchInference(device, activation='softmax')

        preds = []
        for pred in tqdm(inferencer.predict(model, loader, args.tta),
                         total=len(dataset)):
            preds.append(pred)
        preds = np.array(preds)

        if args.clips is not None:
            preds = np.clip(preds, a_min=args.clips[0], a_max=args.clips[1])

        ids.loc[:, target_columns] = preds
        predictions.append(ids)

    if args.mode == 'train':
        predictions = pd.concat(predictions, axis=0)
    elif args.mode == 'test':
        preds2average = np.mean(
            [pred.loc[:, target_columns].values for pred in predictions],
            axis=0)
        predictions = predictions[0]
        predictions.loc[:, target_columns] = preds2average
    # preds2average = np.mean([pred.loc[:, target_columns].values for pred in predictions], axis=0)
    # predictions = predictions[0]
    # predictions.loc[:, target_columns] = preds2average

    if args.verified:
        save_name = f'predictions_verified_{model_name}.csv'
    else:
        save_name = f'predictions_{args.mode}_{model_name}.csv'

    predictions.to_csv(os.path.join(path, save_name), index=False)