Esempio n. 1
0
def seed_stack(s: int = 23) -> None:
    """ """
    from draugr.torch_utilities import torch_seed

    python_seed(s)
    numpy_seed(s)
    torch_seed(s)
Esempio n. 2
0
    def main(model_name: str = "maskrcnn_pennfudanped", score_threshold=0.55):
        base_path = PROJECT_APP_PATH.user_data / 'maskrcnn'
        dataset_root = Path.home() / "Data"

        torch_seed(3825)

        dataset = PennFudanDataset(dataset_root / "PennFudanPed",
                                   Split.Training)
        categories = dataset.categories

        if True:
            model = load_model(model_name=model_name,
                               model_directory=base_path / 'models')
        else:
            model = get_pretrained_instance_segmentation_maskrcnn(
                dataset.response_channels)

        model.to(global_torch_device())
        cpu_device = torch.device("cpu")

        with torch.no_grad():
            with TorchEvalSession(model):
                for image in tqdm(
                        to_tensor_generator(
                            frame_generator(cv2.VideoCapture(0)),
                            device=global_torch_device(),
                        )):
                    prediction = model(
                        # torch_vision_normalize_batch_nchw(
                        uint_hwc_to_chw_float_tensor(image).unsqueeze(0)
                        #    )
                    )[0]

                    (boxes, labels, scores) = (
                        prediction["boxes"].to(cpu_device).numpy(),
                        prediction["labels"].to(cpu_device).numpy(),
                        torch.sigmoid(
                            prediction["scores"]).to(cpu_device).numpy(),
                    )

                    indices = scores > score_threshold

                    cv2.namedWindow(model_name, cv2.WINDOW_NORMAL)
                    cv2.imshow(
                        model_name,
                        draw_bounding_boxes(
                            quick_to_pil_image(image),
                            boxes[indices],
                            labels=labels[indices],
                            scores=scores[indices],
                            categories=categories,
                        ))

                    if cv2.waitKey(1) == 27:
                        break  # esc to quit
Esempio n. 3
0
File: mlp.py Progetto: pything/agent
        val = torch.cat(ins, dim=-1)
        for i in range(self.num_of_layer):
            val = getattr(self, f"_hidden{i}")(val)

        outs = []
        for i in range(len(self._output_shape)):
            outs.append(getattr(self, f"_out{i}")(val))

        if len(outs) == 1:
            return outs[0]

        return outs


if __name__ == "__main__":
    torch_seed(4)

    def stest_single_dim():
        """

    """
        pos_size = (4, )
        a_size = (1, )
        model = MLP(input_shape=pos_size, output_shape=a_size)

        pos_1 = to_tensor(numpy.random.rand(64, pos_size[0]), device="cpu")
        print(model(pos_1)[0].shape)

    def stest_hidden_dim():
        """
def main():
    pyplot.style.use("bmh")
    base_path = Path.home() / "/Data" / "PennFudanPed"

    save_model_path = PROJECT_APP_PATH.user_data / 'models' / "penn_fudan_ped_seg.model"
    train_model = False
    eval_model = not train_model
    SEED = 87539842
    batch_size = 8
    num_workers = 1  # os.cpu_count()
    learning_rate = 0.01
    torch_seed(SEED)

    train_set = PennFudanDataset(base_path, Split.Training)
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers)
    valid_loader = DataLoader(
        PennFudanDataset(base_path, Split.Validation),
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
    )

    model = SkipHourglassFission(
        input_channels=train_set.predictor_shape[-1],
        output_heads=(train_set.response_shape[-1], ),
        encoding_depth=1,
    )
    model.to(global_torch_device())

    if train_model:
        if save_model_path.exists():
            model.load_state_dict(torch.load(str(save_model_path)))
            print("loading saved model")

        with TorchTrainSession(model):
            criterion = BCEDiceLoss(eps=1.0)
            optimiser = torch.optim.SGD(model.parameters(), lr=learning_rate)
            scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                optimiser, T_max=7, eta_min=learning_rate / 100, last_epoch=-1)

            model = train_person_segmenter(
                model,
                train_loader,
                valid_loader,
                criterion,
                optimiser,
                scheduler,
                save_model_path,
            )

    if eval_model:
        if save_model_path.exists():
            model.load_state_dict(torch.load(str(save_model_path)))
            print("loading saved model")

        with TorchDeviceSession(global_torch_device(cuda_if_available=False),
                                model):
            with torch.no_grad():
                with TorchCacheSession():
                    with TorchEvalSession(model):
                        valid_masks = []
                        a = (350, 525)
                        tr = min(len(valid_loader.dataset) * 4, 2000)
                        probabilities = numpy.zeros((tr, *a),
                                                    dtype=numpy.float32)
                        for sample_i, (data, target) in enumerate(
                                tqdm(valid_loader)):
                            data = data.to(global_torch_device())
                            target = target.cpu().detach().numpy()
                            outpu, *_ = model(data)
                            outpu = torch.sigmoid(outpu).cpu().detach().numpy()
                            for p in range(data.shape[0]):
                                output, mask = outpu[p], target[p]
                                """
for m in mask:
  valid_masks.append(cv2_resize(m, a))
for probability in output:
  probabilities[sample_i, :, :] = cv2_resize(probability, a)
  sample_i += 1
"""
                                if sample_i >= tr - 1:
                                    break
                            if sample_i >= tr - 1:
                                break

                        f, ax = pyplot.subplots(3, 3, figsize=(24, 12))

                        for i in range(3):
                            ax[0, i].imshow(valid_masks[i], vmin=0, vmax=1)
                            ax[0, i].set_title("Original", fontsize=14)

                            ax[1, i].imshow(valid_masks[i], vmin=0, vmax=1)
                            ax[1, i].set_title("Target", fontsize=14)

                            ax[2, i].imshow(probabilities[i], vmin=0, vmax=1)
                            ax[2, i].set_title("Prediction", fontsize=14)

                        pyplot.show()
    def main():
        dataset_root = Path.home() / "Data"
        base_path = ensure_existence(PROJECT_APP_PATH.user_data / 'maskrcnn')
        log_path = ensure_existence(PROJECT_APP_PATH.user_log / 'maskrcnn')
        export_root = ensure_existence(base_path / 'models')
        model_name = f'maskrcnn_pennfudanped'

        batch_size = 4
        num_epochs = 10
        optimiser_spec = GDKC(torch.optim.Adam, lr=3e-4)
        scheduler_spec = GDKC(
            torch.optim.lr_scheduler.
            StepLR,  # a learning rate scheduler which decreases the learning rate by
            step_size=3,  # 10x every 3 epochs
            gamma=0.1,
        )
        num_workers = os.cpu_count()
        torch_seed(3825)

        dataset = PennFudanDataset(dataset_root / "PennFudanPed",
                                   Split.Training,
                                   return_variant=ReturnVariant.all)
        dataset_validation = PennFudanDataset(
            dataset_root / "PennFudanPed",
            Split.Validation,
            return_variant=ReturnVariant.all,
        )
        split = SplitIndexer(len(dataset), validation=0.3, testing=0)

        split_indices = torch.randperm(split.total_num).tolist()

        data_loader = DataLoader(
            Subset(dataset, split_indices[:-split.validation_num]),
            batch_size=batch_size,
            shuffle=True,
            num_workers=num_workers,
            collate_fn=collate_batch_fn,
        )

        data_loader_val = DataLoader(
            Subset(dataset_validation, split_indices[-split.validation_num:]),
            batch_size=1,
            shuffle=False,
            num_workers=num_workers,
            collate_fn=collate_batch_fn,
        )

        model = get_pretrained_instance_segmentation_maskrcnn(
            dataset.response_channels)
        optimiser = optimiser_spec(trainable_parameters(model))
        lr_scheduler = scheduler_spec(optimiser)

        if True:
            model = load_model(model_name=model_name,
                               model_directory=export_root)

        if True:
            with TorchTrainSession(model):
                with TensorBoardPytorchWriter(log_path / model_name) as writer:
                    for epoch_i in tqdm(range(num_epochs), desc="Epoch #"):
                        maskrcnn_train_single_epoch(model=model,
                                                    optimiser=optimiser,
                                                    data_loader=data_loader,
                                                    writer=writer)
                        lr_scheduler.step()  # update the learning rate
                        maskrcnn_evaluate(
                            model, data_loader_val, writer=writer
                        )  # evaluate on the validation dataset
                        save_model(model,
                                   model_name=model_name,
                                   save_directory=export_root)

        if True:
            with TorchEvalSession(model):  # put the model in evaluation mode
                img, _ = dataset_validation[
                    0]  # pick one image from the test set

                with torch.no_grad():
                    prediction = model([img.to(global_torch_device())])

                from matplotlib import pyplot
                pyplot.imshow(
                    Image.fromarray(
                        img.mul(255).permute(1, 2, 0).byte().numpy()))
                pyplot.show()

                import cv2

                pyplot.imshow(
                    Image.fromarray(prediction[0]["masks"][0, 0].mul(
                        255).byte().cpu().numpy()))
                pyplot.show()

                (boxes, labels, scores) = (
                    prediction[0]["boxes"].to('cpu').numpy(),
                    prediction[0]["labels"].to('cpu').numpy(),
                    torch.sigmoid(prediction[0]["scores"]).to('cpu').numpy(),
                )

                from draugr.opencv_utilities import draw_bounding_boxes
                from draugr.torch_utilities.images.conversion import quick_to_pil_image

                indices = scores > 0.1

                cv2.namedWindow(model_name, cv2.WINDOW_NORMAL)
                cv2.imshow(
                    model_name,
                    draw_bounding_boxes(
                        quick_to_pil_image(img),
                        boxes[indices],
                        labels=labels[indices],
                        scores=scores[indices],
                        #categories=categories,
                    ))

                cv2.waitKey()
def main():
    pyplot.style.use("bmh")

    base_dataset_path = Path.home() / "Data" / "Datasets" / "Clouds"
    image_path = base_dataset_path / "resized"

    save_model_path = PROJECT_APP_PATH.user_data / "cloud_seg.model"

    SEED = 87539842
    batch_size = 8
    num_workers = 2
    torch_seed(SEED)

    train_loader = DataLoader(
        CloudSegmentationDataset(base_dataset_path,
                                 image_path,
                                 subset=Split.Training),
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
    )
    valid_loader = DataLoader(
        CloudSegmentationDataset(base_dataset_path,
                                 image_path,
                                 subset=Split.Validation),
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
    )
    test_loader = DataLoader(
        CloudSegmentationDataset(base_dataset_path,
                                 image_path,
                                 subset=Split.Testing),
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
    )

    model = SkipHourglassFission(
        CloudSegmentationDataset.predictor_channels,
        (CloudSegmentationDataset.response_channels, ),
        encoding_depth=1,
    )
    model.to(global_torch_device())

    if save_model_path.exists():
        model.load_state_dict(torch.load(
            str(save_model_path)))  # load last model
        print("loading previous model")

    criterion = BCEDiceLoss(eps=1.0)
    lr = 3e-3
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, 7, eta_min=lr / 100, last_epoch=-1)

    model = train_model(
        model,
        train_loader,
        valid_loader,
        criterion,
        optimizer,
        scheduler,
        save_model_path,
    )

    if save_model_path.exists():
        model.load_state_dict(torch.load(
            str(save_model_path)))  # load best model
    model.eval()

    class_parameters = threshold_grid_search(model, valid_loader)

    for _, (data, target) in zip(range(2), valid_loader):
        data = data.to(global_torch_device(), dtype=torch.float)
        output, *_ = model(data)
        output = torch.sigmoid(output)
        output = output[0].cpu().detach().numpy()
        image_vis = data[0].cpu().detach().numpy()
        mask = target[0].cpu().detach().numpy()

        mask = chw_to_hwc(mask)
        output = chw_to_hwc(output)
        image_vis = float_chw_to_hwc_uint(image_vis)

        pr_mask = numpy.zeros(CloudSegmentationDataset.response_shape)
        for j in range(len(CloudSegmentationDataset.categories)):
            probability_ = output[..., j]
            thr, min_size = class_parameters[j][0], class_parameters[j][1]
            pr_mask[..., j], _ = threshold_mask(probability_, thr, min_size)
        CloudSegmentationDataset.visualise_prediction(
            image_vis,
            pr_mask,
            original_image=image_vis,
            original_mask=mask,
            raw_image=image_vis,
            raw_mask=output,
        )

    prepare_submission(model, class_parameters, test_loader)
Esempio n. 7
0
def export_detection_model(
        model_export_path: Path = Path("seg_skip_fis"), ) -> None:
    """

:param verbose:
:type verbose:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param model_export_path:
:type model_export_path:
:return:
:rtype:
"""

    model = OutputActivationModule(
        SkipHourglassFission(input_channels=3,
                             output_heads=(1, ),
                             encoding_depth=1))

    with TorchDeviceSession(
            device=global_torch_device(cuda_if_available=False), model=model):
        with TorchEvalSession(model):
            SEED = 87539842

            torch_seed(SEED)

            # standard PyTorch mean-std input image normalization
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225]),
            ])

            frame_g = frame_generator(cv2.VideoCapture(0))

            for image in tqdm(frame_g):
                example_input = (transform(image).unsqueeze(0).to(
                    global_torch_device()), )

                try:
                    traced_script_module = torch.jit.trace(
                        model,
                        example_input,
                        # strict=strict_jit,
                        check_inputs=(
                            transform(next(frame_g)).unsqueeze(0).to(
                                global_torch_device()),
                            transform(next(frame_g)).unsqueeze(0).to(
                                global_torch_device()),
                        ),
                    )
                    exp_path = model_export_path.with_suffix(".traced")
                    traced_script_module.save(str(exp_path))
                    print(
                        f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}"
                    )
                    sprint(
                        f"Successfully exported JIT Traced model at {exp_path}",
                        color="green",
                    )
                except Exception as e_i:
                    sprint(f"Torch JIT Trace export does not work!, {e_i}",
                           color="red")

                break
Esempio n. 8
0
    def __call__(
            self,
            agent: Type[Agent],
            *,
            load_time: Any = str(int(time.time())),
            seed: int = 0,
            save_ending_model: bool = False,
            save_training_resume: bool = False,
            continue_training: bool = True,
            train_agent: bool = True,
            debug: bool = False,
            num_envs: int = cpu_count(),
            **kwargs,
    ):
        """
Start a session, builds Agent and starts/connect environment(s), and runs Procedure


:param args:
:param kwargs:
:return:
"""
        kwargs.update(num_envs=num_envs)
        kwargs.update(train_agent=train_agent)
        kwargs.update(debug=debug)
        kwargs.update(environment=self._environment)

        with ContextWrapper(torchsnooper.snoop, debug):
            with ContextWrapper(torch.autograd.detect_anomaly, debug):

                if agent is None:
                    raise NoAgent

                if inspect.isclass(agent):
                    sprint("Instantiating Agent",
                           color="crimson",
                           bold=True,
                           italic=True)
                    torch_seed(seed)
                    self._environment.seed(seed)

                    agent = agent(load_time=load_time, seed=seed, **kwargs)

                agent_class_name = agent.__class__.__name__

                total_shape = "_".join([
                    str(i)
                    for i in (self._environment.observation_space.shape +
                              self._environment.action_space.shape +
                              self._environment.signal_space.shape)
                ])

                environment_name = f"{self._environment.environment_name}_{total_shape}"

                save_directory = (PROJECT_APP_PATH.user_data /
                                  environment_name / agent_class_name)
                log_directory = (PROJECT_APP_PATH.user_log / environment_name /
                                 agent_class_name / load_time)

                if self._environment.action_space.is_discrete:
                    rollout_drawer = GDKC(DiscreteScrollPlot,
                                          num_actions=self._environment.
                                          action_space.discrete_steps,
                                          default_delta=None)
                else:
                    rollout_drawer = GDKC(SeriesScrollPlot,
                                          window_length=100,
                                          default_delta=None)

                if train_agent:  # TODO: allow metric writing while not training with flag
                    metric_writer = GDKC(TensorBoardPytorchWriter,
                                         path=log_directory)
                else:
                    metric_writer = GDKC(MockWriter)

                with ContextWrapper(metric_writer,
                                    train_agent) as metric_writer:
                    with ContextWrapper(rollout_drawer,
                                        num_envs == 1) as rollout_drawer:

                        agent.build(
                            self._environment.observation_space,
                            self._environment.action_space,
                            self._environment.signal_space,
                            metric_writer=metric_writer,
                        )

                        kwargs.update(
                            environment_name=(
                                self._environment.environment_name, ),
                            save_directory=save_directory,
                            log_directory=log_directory,
                            load_time=load_time,
                            seed=seed,
                            train_agent=train_agent,
                        )

                        found = False
                        if continue_training:
                            sprint(
                                "Searching for previously trained models for initialisation for this configuration "
                                "(Architecture, Action Space, Observation Space, ...)",
                                color="crimson",
                                bold=True,
                                italic=True,
                            )
                            found = agent.load(save_directory=save_directory,
                                               evaluation=not train_agent)
                            if not found:
                                sprint(
                                    "Did not find any previously trained models for this configuration",
                                    color="crimson",
                                    bold=True,
                                    italic=True,
                                )

                        if not train_agent:
                            agent.eval()
                        else:
                            agent.train()

                        if not found:
                            sprint(
                                "Training from new initialisation",
                                color="crimson",
                                bold=True,
                                italic=True,
                            )

                        session_proc = self._procedure(agent, **kwargs)

                        with CaptureEarlyStop(
                                callbacks=self._procedure.stop_procedure,
                                **kwargs):
                            with StopWatch() as timer:
                                with suppress(KeyboardInterrupt):
                                    training_resume = session_proc(
                                        metric_writer=metric_writer,
                                        rollout_drawer=rollout_drawer,
                                        **kwargs)
                                    if training_resume and "stats" in training_resume and save_training_resume:
                                        training_resume.stats.save(**kwargs)

                        end_message = f"Training ended, time elapsed: {timer // 60:.0f}m {timer % 60:.0f}s"
                        line_width = 9
                        sprint(
                            f'\n{"-" * line_width} {end_message} {"-" * line_width}\n',
                            color="crimson",
                            bold=True,
                            italic=True,
                        )

                        if save_ending_model:
                            agent.save(**kwargs)

                        try:
                            self._environment.close()
                        except BrokenPipeError:
                            pass

                        exit(0)