コード例 #1
0
ファイル: inference.py プロジェクト: smikhai1/Medicine-AI
def main():
    args = parse_args()
    config = get_config(args.config)
    paths = get_config(args.paths)
    params = config['train_params']
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**params['model_params'])
    model.load_state_dict(torch.load(params['weights'])['state_dict'])
    paths = paths['data']

    dataset = TestDataset(image_dir=Path(paths['path']) /
                          Path(paths['test_images']),
                          ids=None,
                          transform=test_transform(
                              **config['data_params']['augmentation_params']))

    loader = DataLoader(dataset=dataset,
                        batch_size=1,
                        shuffle=False,
                        drop_last=False,
                        num_workers=16,
                        pin_memory=torch.cuda.is_available())

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    inferencer = PytorchInference(device)

    for pred, name in tqdm(zip(inferencer.predict(model, loader), dataset.ids),
                           total=len(dataset)):
        np.savez(
            Path(paths['path']) / Path(paths['predictions_path']) /
            f'{name}.npz', pred)
コード例 #2
0
ファイル: true_train.py プロジェクト: amirassov/cft
def main():
    args = parse_args()
    set_global_seeds(666)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    config = get_config(args.config)
    paths = get_config(args.paths)
    data_factory = TranslationFactory(config['data_params'],
                                      paths['data'],
                                      device=device)

    config['train_params']['model_params']['vocabulary_size'] = len(
        data_factory.field.vocab)
    config['train_params']['model_params'][
        'pad_id'] = data_factory.field.vocab.pad_id
    config['train_params']['model_params'][
        'sos_id'] = data_factory.field.vocab.sos_id
    config['train_params']['loss_params'][
        'pad_id'] = data_factory.field.vocab.pad_id

    factory = Factory(config['train_params'])

    callbacks = create_callbacks(config['train_params']['name'],
                                 paths['dumps'])
    runner = TranslationRunner(stages=config['stages'],
                               factory=factory,
                               callbacks=callbacks,
                               device=device,
                               meta_data={
                                   'vocabulary': data_factory.field.vocab,
                                   'config': config
                               })

    runner.fit(data_factory)
コード例 #3
0
def main():
    args = parse_args()
    config = get_config(args.config)
    paths = get_config(args.paths)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    factory = TranslationFactory(config['data_params'],
                                 paths=paths['data'],
                                 device=device)
    dataset = factory.val_dataset

    model = torch.load(args.weights).to(device)
    tgt_texts, out_texts = [], []
    for pair in tqdm(dataset):
        src, tgt = pair.src, pair.tgt
        out_text, all_attention_weights = translate(
            model, src + ['<eos>'], config['data_params']['max_seq_len'],
            device)
        tgt_texts.append(''.join(tgt))
        out_texts.append(out_text)

    print(
        f'ACCURACY: {sum(x == y for x, y in zip(out_texts, tgt_texts)) / len(tgt_texts)}'
    )

    with open(Path(paths['data']['path']) / paths['data']['pre_test'],
              "w") as f:
        f.write('\n'.join(out_texts))
コード例 #4
0
def main():
    args = parse_args()
    config = get_config(args.config)
    paths = get_config(args.paths)
    params = config['train_params']
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**params['model_params'])
    model.load_state_dict(torch.load(params['weights'])['state_dict'])
    paths = paths['data']

    dataset = TestDataset(image_dir=Path(paths['path']) /
                          Path(paths['test_images']),
                          transform=test_transform(
                              **config['data_params']['augmentation_params']))

    loader = DataLoader(dataset=dataset,
                        batch_size=16,
                        shuffle=False,
                        drop_last=False,
                        num_workers=16,
                        pin_memory=torch.cuda.is_available())

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    inferencer = PytorchInference(device)
    ids = dataset.ids
    predictions = []
    for i, pred in tqdm(enumerate(inferencer.predict(model, loader)),
                        total=len(dataset)):
        print(np.argmax(pred))
        predictions.append([ids[i], np.argmax(pred)])

    predictions = pd.DataFrame(predictions, columns=['fname', 'preds'])
    predictions.to_csv('preds_test.csv')
コード例 #5
0
def main():
    args = parse_args()
    config = get_config(args.config)
    paths = get_config(args.paths)
    params = config['train_params']
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**params['model_params'])
    model.load_state_dict(torch.load(params['weights'])['state_dict'])
    paths = paths['data']

    dataset = TestDataset(
        path=Path(paths['path']),
        image_csv=pd.read_csv(os.path.join(paths['path'], paths['test_images'])),
        transform=test_transform(**config['data_params']['augmentation_params']))

    loader = DataLoader(
        dataset=dataset,
        batch_size=1,
        shuffle=False,
        drop_last=False,
        num_workers=16,
        pin_memory=torch.cuda.is_available())

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    inferencer = PytorchInference(device)
    torch.set_num_threads(20)
    test_csv = pd.read_csv(os.path.join(paths['path'], paths['test_images']))
    print(os.path.join(paths['path'], paths['test_images']))
    # test_csv['predicted_EncodedPixels'] = None  ####

    ## segmentation __
    i = 0
    for prediction in tqdm(inferencer.predict(model, loader), total=len(dataset)):
        #print(test_csv.loc[0, "EncodedPixels"])
        prediction = np.squeeze(prediction).T
        prediction = torch.from_numpy(prediction)
        prediction = torch.nn.Sigmoid()(prediction)
        prediction = (prediction > 0.65).float()
        prediction = prediction.numpy()
        mask = mask_to_rle(cv2.resize(prediction, dsize=(1024, 1024), interpolation=cv2.INTER_NEAREST),1024,1024)
        # mask = run_length_encode(cv2.resize(prediction, dsize=(1024, 1024), interpolation=cv2.INTER_NEAREST))
        if test_csv.loc[i, "EncodedPixels"] != str(-1):
            test_csv.loc[i, "EncodedPixels"] = mask
        i += 1
    # test_csv.to_csv(os.path.join(paths['path'], "../submission_seg_23.csv"), index=False) #train_segmentation_view.csv
    # ## ^^

    # i = 0
    # for prediction in tqdm(inferencer.predict(model, loader), total=len(dataset)):
    #     prediction = torch.from_numpy(prediction)
    #     prediction = torch.squeeze(prediction)
    #     prediction = torch.nn.Sigmoid()(prediction)
    #     prediction = (prediction >= 0.7).float()
    #     if prediction == 0:
    #         test_csv.loc[i, "EncodedPixels"] = -1
    #     i += 1
    test_csv.to_csv(os.path.join(paths['path'], "../stage_2_sample_submission.csv"), index=False)
コード例 #6
0
def main():
    args = parse_args()
    # set_global_seeds(42)
    config = get_config(args.config)
    print(config, '\n')

    config['train_params'][
        'name'] = f'{config["train_params"]["name"]}/{args.fold}'
    paths = get_config(args.paths)
    print(paths, '\n')

    if config['train_params']['new_save']:
        name_save = paths["dumps"]["name_save"]
        last_name_save = get_last_save(
            Path(paths["dumps"]["path"]) / paths["dumps"]["weights"] /
            config["train_params"]["name"])
        paths["dumps"]['name_save'] = f'{name_save}_{last_name_save + 1}'
        print(paths["dumps"]['name_save'])
    else:
        paths["dumps"]['name_save'] = paths['name_save']

    config['train_params']['name_save'] = paths["dumps"]['name_save']
    config['train_params']['save_dir'] = Path(paths['dumps']['path']) / \
                                         paths['dumps']['weights'] / \
                                         config['train_params']['name']
    factory = Factory(config['train_params'])

    data_factory = TaskDataFactory(
        config['data_params'],
        paths['data'],
        fold=args.fold,
        num_classes=config['train_params']['model_params']['num_classes'])
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    callbacks = create_callbacks(
        name=config['train_params']['name'],
        dumps=paths['dumps'],
        name_save=paths["dumps"]["name_save"],
        monitor_metric=config['train_params']['metrics'][-1])

    trainer = Runner(
        stages=config['stages'],
        factory=factory,
        callbacks=callbacks,
        device=device,
        fold=args.fold,
        num_classes=config['train_params']['model_params']['num_classes'])

    trainer.fit(data_factory)
コード例 #7
0
def main():
    classes = {0: 'long', 1: 'medium', 2: 'closeup', 3: 'detail'}
    topk = len(classes)
    arch = 'resnet34'
    target_layer = 'layer4.2'
    image_path = r'C:\NAIVE\datasets\shot_total_bigger\fixed_train\3_medium\eddi_orel_155.jpg'
    dst_path = r'C:/NAIVE/test_videos/gram_cam/'

    args = parse_args()
    config = get_config(args.config)
    params = config['train_params']
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**params['model_params'])
    model.load_state_dict(torch.load(params['weights'])['state_dict'])
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device).eval()

    raw_image = cv2.imread(image_path)
    image = prepare_image(raw_image)
    image = image.unsqueeze(0)
    image = image.to(device)

    gcam = GradCAM(model=model)
    probs, idx = gcam.forward(image)
    idx = idx.cpu().numpy()
    for i in range(0, topk):
        gcam.backward(idx=idx[i])
        output = gcam.generate(target_layer=target_layer)
        save_gradcam(filename=os.path.join(
            dst_path, '{}_gcam_{}.png'.format(classes[idx[i]], arch)),
                     gcam=output,
                     raw_image=raw_image)
        print('[{:.5f}] {}'.format(probs[i], classes[idx[i]]))
コード例 #8
0
    class Args:
        config = get_config(path=os.environ["OPENAI_CONFIG"])

        path = config['path']
        train_config = config['model']
        tta = config['inference']['tta']
        ids = config['inference']['ids']
        clips = config['inference'].get('clipping')

        folds = os.listdir(
            os.path.join(path, 'dumps', train_config['train_params']['name']))
        mode = config['inference']['mode']

        verified = config['inference'].get('verified')
コード例 #9
0
            predictions = model(images)
            for prediction in predictions:
                prediction = np.moveaxis(self.to_numpy(prediction), 0, -1)
                yield prediction


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default=None)
    parser.add_argument('--paths', type=str, default=None)
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    config = get_config(args.config)
    paths = get_config(args.paths)
    params = config['train_params']
    model_name = config['train_params']['model']
    model = pydoc.locate(model_name)(**params['model_params'])
    model.load_state_dict(torch.load(params['weights'])['state_dict'])
    paths = paths['data']

    files = glob.glob('/mnt/hdd1/datasets/naive_data/shot_dataset/test/closeup/*.*')
    batch = np.zeros([16, 224, 224, 3])
    for i in range(batch.shape[0]):
        batch[i, :, :, :] = imresize(imread(files[i]), (224, 224))

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    inferencer = PytorchInference(device)
    preds = inferencer.predict_on_batch(model, batch)