def predict_results(config_types: [type(BaseClassificationTrainConfig)], output_file: str, threshold: float):
    dataset = create_dataset(is_test=True, include_negatives=True)

    output_dir = os.path.dirname(output_file)
    if not os.path.exists(output_dir) and not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    predictors = []
    for config_type in config_types:
        fsm = FileStructManager(base_dir=config_type.experiment_dir, is_continue=True)
        predictors.append(Predictor(config_type.create_model().cuda(), fsm=fsm))

    with open(output_file, 'w') as out_file:
        out_file.write("ImageId,EncodedPixels\n")
        out_file.flush()

        images_paths = dataset.get_items()
        for i, data in enumerate(tqdm(dataset)):
            data = cv2.resize(data, (512, 512))
            img_tensor = torch.from_numpy(np.expand_dims(np.expand_dims(data.astype(np.float32), 0) / 128 - 1, 0)).cuda()

            res = []
            for predictor in predictors:
                res.append(np.squeeze(predictor.predict({'data': img_tensor}).data.cpu().numpy()))
            res = np.median(res)
            res = np.where(res > threshold, 1, 0).astype(np.int)

            out_file.write("{},{}\n".format(os.path.splitext(os.path.basename(images_paths[i]))[0], float(res)))
            out_file.flush()
示例#2
0
def predict(config_type: type(BaseSegmentationTrainConfig), output_file: str):
    # dataset = create_dataset(is_test=False, indices_path='data/indices/train.npy')
    dataset = create_dataset(is_test=True,
                             for_segmentation=True,
                             include_negatives=False,
                             indices_path='data/indices/test_seg.npy')

    output_dir = os.path.dirname(output_file)
    if not os.path.exists(output_dir) and not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    fsm = FileStructManager(base_dir=config_type.experiment_dir,
                            is_continue=True)
    predictor = Predictor(config_type.create_model().cuda(), fsm=fsm)

    with open(output_file, 'w') as out_file:
        out_file.write("ImageId,EncodedPixels\n")
        out_file.flush()

        images_paths = dataset.get_items()
        for i, data in enumerate(tqdm(dataset)):
            # img = data['data'].copy()
            # target = data['target'].copy()
            data = cv2.resize(data, (512, 512))
            img_tensor = torch.from_numpy(
                np.expand_dims(
                    np.expand_dims(data.astype(np.float32), 0) / 128 - 1,
                    0)).cuda()
            res = np.squeeze(
                predictor.predict({
                    'data': img_tensor
                }).data.cpu().numpy())
            res[res < 0.7] = 0

            if res[res > 0].size < 101:
                rle = -1
            else:
                res = (res * 255).astype(np.uint8)
                res = cv2.resize(res, (1024, 1024))

                # res_cntrs, _ = cv2.findContours(res, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
                # target_cntrs, _ = cv2.findContours((target * 255).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
                #
                # img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
                # img = cv2.drawContours(img, res_cntrs, -1, (0, 0, 255))
                # img = cv2.drawContours(img, target_cntrs, -1, (0, 255, 0))

                # cv2.imshow("img", img)
                # cv2.waitKey(0)

                # res = cv2.flip(res, 1)
                # res = cv2.rotate(res, cv2.ROTATE_90_COUNTERCLOCKWISE)

                res[res > 0] = 255
                rle = mask2rle(res)

            out_file.write("{},{}\n".format(
                os.path.splitext(os.path.basename(images_paths[i]))[0], rle))
            out_file.flush()
def run(config_type: object, out: str):
    dataset = create_augmented_dataset(is_train=False, to_pytorch=True, indices_path='data/indices/test.npy')

    folds = generate_folds_names(TrainConfig.folds_num)

    for fold in folds:
        fsm = FileStructManager(base_dir=os.path.join(config_type.experiment_dir, os.path.splitext(fold['val'])[0]), is_continue=True)
        predictor = Predictor(config_type.create_model(False).cuda(), fsm=fsm)
        metrics = MetricsEval(dataset, predictor, SegmentationMetricsProcessor('eval'))\
            .set_data_preprocess(lambda x: torch.from_numpy(np.expand_dims(x, 0)).cuda())\
            .set_target_preprocess(lambda x: torch.reshape(x, (1, x.shape[0], x.shape[1], x.shape[2]))).run().get_metrics()
        print(metrics)
def train(config_type: type(BaseSegmentationTrainConfig)):
    fsm = FileStructManager(base_dir=config_type.experiment_dir, is_continue=False)

    config = config_type({'train': ['train_seg.npy'], 'val': 'val_seg.npy'})

    trainer = Trainer(config, fsm, device=torch.device('cuda'))
    tensorboard = TensorboardMonitor(fsm, is_continue=False)
    trainer.monitor_hub.add_monitor(tensorboard)

    trainer.set_epoch_num(300)
    trainer.enable_lr_decaying(coeff=0.5, patience=10, target_val_clbk=lambda: np.mean(config.val_stage.get_losses()))
    trainer.add_on_epoch_end_callback(lambda: tensorboard.update_scalar('params/lr', trainer.data_processor().get_lr()))
    trainer.enable_best_states_saving(lambda: np.mean(config.val_stage.get_losses()))
    trainer.add_stop_rule(lambda: trainer.data_processor().get_lr() < 1e-6)

    trainer.train()
def predict_on_test_set(config_type: type(BaseClassificationTrainConfig)):
    dataset = create_dataset(is_test=False, include_negatives=True, indices_path='data/indices/test_class.npy')

    fsm = FileStructManager(base_dir=config_type.experiment_dir, is_continue=True)
    predictor = Predictor(config_type.create_model().cuda(), fsm=fsm)

    predicts, targets = [], []

    for i, data in enumerate(tqdm(dataset)):
        targets.append(ClassificationAugmentations.mask2class(data['target']))

        data = cv2.resize(data['data'], (512, 512))
        img_tensor = torch.from_numpy(np.expand_dims(np.expand_dims(data.astype(np.float32), 0) / 128 - 1, 0)).cuda()
        res = np.squeeze(predictor.predict({'data': img_tensor}).data.cpu().numpy())
        predicts.append(res)

    return np.array(predicts), np.array(targets)
示例#6
0
def train(num_epochs=5):
    fsm = FileStructManager(base_dir="models/UoI/", is_continue=False)
    model = HumanBBox()

    train_dataset = DataProducer(
        [
            BBoxDataset(
                "coco/train2017_one_human.csv", size=SZ, type="train", fastai_out=False
            )
        ],
        batch_size=8,
        num_workers=5,
    )
    validation_dataset = DataProducer(
        [
            BBoxDataset(
                "coco/val2017_one_human_train.csv",
                size=SZ,
                type="val",
                fastai_out=False,
            )
        ],
        batch_size=4,
        num_workers=2,
    )

    train_config = TrainConfig(
        [TrainStage(train_dataset), ValidationStage(validation_dataset)],
        # torch.nn.L1Loss(),
        IoU,
        torch.optim.SGD(model.parameters(), lr=5e-3, momentum=0.8),
    )

    trainer = (
        Trainer(model, train_config, fsm, torch.device("cuda:0")).set_epoch_num(
            num_epochs
        )
        # .enable_lr_decaying(0.97, 1000)
    )
    trainer.monitor_hub.add_monitor(TensorboardMonitor(fsm, is_continue=True))
    # .add_monitor(LogMonitor(fsm)
    # .resume(from_best_checkpoint=False)
    trainer.train()
def predict(config_types: [type(BaseSegmentationTrainConfig)], output_file: str, class_predicts: {}):
    dataset = create_dataset(is_test=True, include_negatives=False)

    output_dir = os.path.dirname(output_file)
    if not os.path.exists(output_dir) and not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    predictors = []
    for config_type in config_types:
        fsm = FileStructManager(base_dir=config_type.experiment_dir, is_continue=True)
        predictors.append(Predictor(config_type.create_model().cuda(), fsm=fsm))

    with open(output_file, 'w') as out_file:
        out_file.write("ImageId,EncodedPixels\n")
        out_file.flush()

        images_paths = dataset.get_items()
        for i, data in enumerate(tqdm(dataset)):
            cur_img_path = os.path.splitext(os.path.basename(images_paths[i]))[0]
            data = cv2.resize(data, (512, 512))
            img_tensor = torch.from_numpy(np.expand_dims(np.expand_dims(data.astype(np.float32), 0) / 128 - 1, 0)).cuda()

            res = []
            for predictor in predictors:
                res.append(np.squeeze(predictor.predict({'data': img_tensor}).data.cpu().numpy()))
            res = np.median(res, axis=0)
            res[res < 0.7] = 0

            if not class_predicts[cur_img_path]:
                rle = '-1'
            else:
                res = (res * 255).astype(np.uint8)
                res = cv2.resize(res, (1024, 1024))

                res[res > 0] = 255
                rle = mask2rle(res)

                if len(rle) < 1:
                    rle = '-1'

            out_file.write("{},{}\n".format(cur_img_path, rle))
            out_file.flush()
示例#8
0
        self.model = model
        self.activation = torch.nn.Sigmoid()

    def forward(self, data):
        return self.activation(self.model(data))


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Export model')
    parser.add_argument('-e', '--exp_name', type=str, help='Experiment name', required=False)
    parser.add_argument('-t', '--target', type=str, help='Target format', choices=['onnx', 'libtorch'], required=True)
    parser.add_argument('-o', '--out', type=str, help='Output file path', required=True)

    if len(sys.argv) < 2:
        print('Bad arguments passed', file=sys.stderr)
        parser.print_help(file=sys.stderr)
        exit(2)
    args = parser.parse_args()

    model = create_model().eval()

    if args.exp_name is not None:
        file_struct_manager = FileStructManager(base_dir=MyTrainConfig.experiment_dir, is_continue=True)
        predictor = Predictor(model, file_struct_manager)
    model = Model(model)

    if args.target == 'onnx':
        to_onnx(model, args.out)
    elif args.target == 'libtorch':
        to_libtorch(model, args.out)
示例#9
0
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])

    def __init__(self, data_dir: str, is_train: bool):
        self.dataset = datasets.MNIST(data_dir, train=is_train, download=True)

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, item):
        data, target = self.dataset[item]
        return {'data': self.transforms(data), 'target': target}


if __name__ == '__main__':
    fsm = FileStructManager(base_dir='data', is_continue=False)
    model = Net()

    train_dataset = DataProducer([MNISTDataset('data/dataset', True)],
                                 batch_size=4,
                                 num_workers=2)
    validation_dataset = DataProducer([MNISTDataset('data/dataset', False)],
                                      batch_size=4,
                                      num_workers=2)

    train_config = TrainConfig(
        model,
        [TrainStage(train_dataset),
         ValidationStage(validation_dataset)], torch.nn.NLLLoss(),
        torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.5))
示例#10
0
from models import GoogleNet


def predict_img(img: np.ndarray, predictor: Predictor, crops: int = 10):
    rc = RandomCrop(299, 299)
    batch = np.array([rc(image=img)['image'] for i in range(crops)])
    batch = torch.from_numpy(
        np.moveaxis(batch.astype(np.float32) / 255., -1, 1))
    output = predictor.predict({'data': batch})
    pos = np.mean(output[0].cpu().data.numpy().copy(), axis=0)
    qtn = np.mean(output[1].cpu().data.numpy().copy(), axis=0)
    print('position: {}, quaternion: {}'.format(pos, qtn))
    return pos, qtn


if __name__ == "__main__":
    if len(sys.argv) < 2:
        raise Exception('Missed path to image as argument')

    img = cv2.imread(sys.argv[1])
    model = GoogleNet(inception_v3(pretrained=True))
    model = torch.nn.DataParallel(model)
    checkpoints_dir = 'experiments/exp_last'
    fsm = FileStructManager(base_dir=checkpoints_dir, is_continue=True)
    predictor = Predictor(model,
                          fsm=fsm,
                          from_best_state=False,
                          device=torch.device('cuda'))
    predict_img(img, predictor)