Example #1
0
def main(is_user: bool = False):
    """ """
    global LOG_WRITER
    if is_user:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.user_log)
            / f"{PROJECT_NAME}_publisher.log"
        )
    else:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.site_log)
            / f"{PROJECT_NAME}_publisher.log"
        )
    LOG_WRITER.open()
    client = mqtt.Client()
    client.on_publish = on_publish
    client.on_disconnect = on_disconnect

    HEIMDALLR_SETTINGS = HeimdallrSettings()  # TODO input scope

    client.username_pw_set(
        HEIMDALLR_SETTINGS.mqtt_username, HEIMDALLR_SETTINGS.mqtt_password
    )
    try:
        client.connect(
            HEIMDALLR_SETTINGS.mqtt_broker, HEIMDALLR_SETTINGS.mqtt_port, keepalive=60
        )
    except ValueError as ve:
        raise ValueError(
            f"{HEIMDALLR_SETTINGS._mqtt_settings_path},"
            f"{HEIMDALLR_SETTINGS.mqtt_broker},"
            f"{HEIMDALLR_SETTINGS.mqtt_port},"
            f"{ve}"
        )
    client.loop_start()

    sensor_data = NOD({HOSTNAME: pull_gpu_info()})
    next_reading = time.time()

    with IgnoreInterruptSignal():
        print("Publisher started")

        for _ in busy_indicator():
            sensor_data[HOSTNAME] = pull_gpu_info()
            s = sensor_data.as_dict()
            s = json.dumps(s)
            client.publish(ALL_CONSTANTS.MQTT_TOPIC, s, ALL_CONSTANTS.MQTT_QOS)
            next_reading += ALL_CONSTANTS.MQTT_PUBLISH_INTERVAL_SEC
            sleep_time = next_reading - time.time()

            if sleep_time > 0:
                time.sleep(sleep_time)

    # noinspection PyUnreachableCode
    LOG_WRITER.close()
    client.loop_stop()
    client.disconnect()
Example #2
0
def main(setting_scope: SettingScopeEnum = SettingScopeEnum.user):
    """ """
    global LOG_WRITER
    if setting_scope == SettingScopeEnum.user:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.user_log) /
            f"{PROJECT_NAME}_publisher.log")
    else:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.site_log) /
            f"{PROJECT_NAME}_publisher.log")
    LOG_WRITER.open()
    client = mqtt.Client()
    client.on_publish = on_publish
    client.on_disconnect = on_disconnect

    HEIMDALLR_SETTINGS = HeimdallrSettings(setting_scope)

    client.username_pw_set(HEIMDALLR_SETTINGS.mqtt_username,
                           HEIMDALLR_SETTINGS.mqtt_password)
    try:
        client.connect(HEIMDALLR_SETTINGS.mqtt_broker,
                       HEIMDALLR_SETTINGS.mqtt_port,
                       keepalive=60)
    except ValueError as ve:
        raise ValueError(f"{HEIMDALLR_SETTINGS._mqtt_settings_path},"
                         f"{HEIMDALLR_SETTINGS.mqtt_broker},"
                         f"{HEIMDALLR_SETTINGS.mqtt_port},"
                         f"{ve}")

    client.loop_start()

    sensor_data = NOD({HOSTNAME: pull_gpu_info()})

    if True:  # with IgnoreInterruptSignal():
        print("Publisher started")

        def job():
            """ """
            sensor_data[HOSTNAME] = pull_gpu_info()
            s = sensor_data.as_dict()
            s = json.dumps(s)
            client.publish(ALL_CONSTANTS.MQTT_TOPIC, s, ALL_CONSTANTS.MQTT_QOS)

        schedule.every(ALL_CONSTANTS.MQTT_PUBLISH_INTERVAL_SEC).seconds.do(job)

        for _ in busy_indicator():
            schedule.run_pending()
            time.sleep(1)

    # noinspection PyUnreachableCode
    LOG_WRITER.close()
    client.loop_stop()
    client.disconnect()
Example #3
0
    def asadsa2():
        """ """
        from draugr.torch_utilities import to_tensor
        from neodroidaudition.data.recognition.libri_speech import LibriSpeech
        from neodroidaudition.noise_generation.gaussian_noise import white_noise
        import torchaudio
        from pathlib import Path

        libri_speech = LibriSpeech(path=Path.home() / "Data" / "Audio" /
                                   "Speech" / "LibriSpeech")
        files, sr = zip(*[(v[0].numpy(), v[1])
                          for _, v in zip(range(1), libri_speech)])
        assert all([sr[0] == s for s in sr[1:]])

        normed = files[0]
        mixed = mix_ratio(normed, normed, 0)
        mixed2 = mix_ratio(mixed, mixed, 0)
        print(normed, mixed)
        print(mixed2, mixed)
        print(root_mean_square(normed))
        print(root_mean_square(mixed))
        print(root_mean_square(mixed2))
        assert numpy.allclose(normed, mixed)
        assert numpy.allclose(mixed2, mixed)
        torchaudio.save(
            str(ensure_existence(Path.cwd() / "exclude") / "mixed_same.wav"),
            to_tensor(mixed),
            int(sr[0]),
        )
Example #4
0
def compute_additive_noise_samples(*, voice_activity_mask: numpy.ndarray,
                                   signal_file: Path, category, out_dir,
                                   noise_file) -> None:
    sr_noise, noise = wavfile.read(str(noise_file))
    sr_signal, signal = wavfile.read(str(signal_file))

    max_sample = numpy.max(signal)
    signal = signal / max_sample

    noise_part = sample_noise(noise / numpy.max(noise),
                              noise_rate=sr_noise,
                              signal_len=len(signal),
                              signal_rate=sr_signal)

    noise_scaled = noise_part * (root_mean_square(
        mask_split_speech_silence(voice_activity_mask, signal)[0]) /
                                 root_mean_square(noise_part)
                                 )  # scaled by ratio of speech to noise level

    for snr in (i * 5 for i in range(5)):
        noised = signal + noise_scaled / (10**(snr / 20))
        wavfile.write(
            str(
                ensure_existence(
                    out_dir / f'{noise_file.with_suffix("").name}_SNR_{snr}dB'
                    / category) / signal_file.name),
            sr_signal,
            ((noised / numpy.max(noised)) * max_sample).astype(numpy.int16),
        )
Example #5
0
def main(setting_scope: SettingScopeEnum = SettingScopeEnum.user):
    """ """
    global LOG_WRITER
    if setting_scope == SettingScopeEnum.user:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.user_log) /
            f"{PROJECT_NAME}_server.log")
    else:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.site_log) /
            f"{PROJECT_NAME}_server.log")
    LOG_WRITER.open()
    MQTT_CLIENT.on_message = on_message
    MQTT_CLIENT.on_disconnect = on_disconnect

    CRYSTALLISED_HEIMDALLR_SETTINGS = HeimdallrSettings(setting_scope)
    if True:
        if (CRYSTALLISED_HEIMDALLR_SETTINGS.mqtt_access_token
                and False):  # TODO: not implemented
            pass
            # MQTT_CLIENT.username_pw_set(CRYSTALLISED_HEIMDALLR_SETTINGS.MQTT_ACCESS_TOKEN)
        else:
            MQTT_CLIENT.username_pw_set(
                CRYSTALLISED_HEIMDALLR_SETTINGS.mqtt_username,
                CRYSTALLISED_HEIMDALLR_SETTINGS.mqtt_password,
            )
        MQTT_CLIENT.connect(
            CRYSTALLISED_HEIMDALLR_SETTINGS.mqtt_broker,
            CRYSTALLISED_HEIMDALLR_SETTINGS.mqtt_port,
            keepalive=60,
        )
        MQTT_CLIENT.subscribe(ALL_CONSTANTS.MQTT_TOPIC, ALL_CONSTANTS.MQTT_QOS)

    DASH_APP.title = ALL_CONSTANTS.HTML_TITLE
    DASH_APP.update_title = ALL_CONSTANTS.HTML_TITLE

    DASH_APP.run_server(
        host=ALL_CONSTANTS.SERVER_ADDRESS,
        port=ALL_CONSTANTS.SERVER_PORT,
        debug=ALL_CONSTANTS.DEBUG,
        dev_tools_hot_reload=ALL_CONSTANTS.DEBUG,
    )

    LOG_WRITER.close()
Example #6
0
    def main():
        """Makes a forward pass to find the category index with the highest score,
        and computes intermediate activations.
        """

        from apppath import ensure_existence

        use_cuda = True
        image_path = str(Path.home() / "Data" / "ok.png")
        model = models.resnet50(pretrained=True)
        grad_cam = GradientClassActivationMapping(
            model=model,
            feature_module=model.layer4,
            target_layer_names=["2"],
            use_cuda=use_cuda,
        )
        """
model = models.resnet18(pretrained=True)
#print(list(model.named_parameters()))
grad_cam = GradientClassActivationMapping(
model=model,
feature_module=model.layer4,
target_layer_names=["1"],
use_cuda=use_cuda,
)
"""
        img = cv2.imread(image_path, 1)
        img = numpy.float32(img) / 255
        # Opencv loads as BGR:
        img = img[:, :, ::-1]
        input_img = preprocess_image(img)

        # If None, returns the map for the highest scoring category.
        # Otherwise, targets the requested category.
        target_category = (518,)
        grayscale_cam = grad_cam(input_img, target_category)

        grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))
        cam = overlay_cam_on_image(img, grayscale_cam)

        gb_model = GuidedBackPropReLUModel(model=model, use_cuda=use_cuda)
        gb = gb_model(input_img, target_category=target_category)
        gb = gb.transpose((1, 2, 0))

        cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
        cam_gb = unstandardise_image(cam_mask * gb)
        gb = unstandardise_image(gb)

        exclude = ensure_existence(Path.cwd() / "exclude")

        cv2.imwrite(str(exclude / "cam.jpg"), cam)
        cv2.imwrite(str(exclude / "gb.jpg"), gb)
        cv2.imwrite(str(exclude / "cam_gb.jpg"), cam_gb)
Example #7
0
def main():
    from configs.mobilenet_v2_ssd320_voc0712 import base_cfg

    # from configs.efficient_net_b3_ssd300_voc0712 import base_cfg
    # from configs.vgg_ssd300_voc0712 import base_cfg

    parser = argparse.ArgumentParser(
        description="Single Shot MultiBox Detector Training With PyTorch"
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "--log_step", default=10, type=int, help="Print logs every log_step"
    )
    parser.add_argument(
        "--save_step", default=2500, type=int, help="Save checkpoint every save_step"
    )
    parser.add_argument(
        "--eval_step",
        default=2500,
        type=int,
        help="Evaluate dataset every eval_step, disabled when eval_step < 0",
    )
    parser.add_argument("--use_tensorboard", default=True, type=str2bool)
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    args = parser.parse_args()
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    args.num_gpus = num_gpus

    set_benchmark_device_dist(args.distributed, args.local_rank)
    logger = setup_distributed_logger(
        "SSD",
        global_distribution_rank(),
        ensure_existence(PROJECT_APP_PATH.user_data / "results")
    )
    logger.info(f"Using {num_gpus} GPUs")
    logger.info(args)
    with TorchCacheSession():
        model = train_ssd(
            base_cfg.data_dir,
            base_cfg,
            base_cfg.solver,
            NOD(**args.__dict__)
        )

    if not args.skip_test:
        logger.info("Start evaluating...")
        do_ssd_evaluation(base_cfg, model, distributed=args.distributed)
Example #8
0
    def asadsa():
        """ """
        from draugr.torch_utilities import to_tensor
        from neodroidaudition.data.recognition.libri_speech import LibriSpeech
        from neodroidaudition.noise_generation.gaussian_noise import white_noise
        import torchaudio

        from pathlib import Path

        libri_speech = LibriSpeech(
            path=Path.home() / "Data" / "Audio" / "Speech" / "LibriSpeech",
            split=Split.Testing,
        )
        files, sr = zip(*[(v[0].numpy(), v[1])
                          for _, v in zip(range(20), libri_speech)])
        assert all([sr[0] == s for s in sr[1:]])

        mix = files[0]
        for file in files[1:]:
            mix = mix_ratio(mix, file, 0)

        torchaudio.save(
            str(
                ensure_existence(Path.cwd() / "exclude") /
                f"mixed_even_babble.wav"),
            to_tensor(mix),
            int(sr[0]),
        )

        for ratio in range(-20, 20 + 1, 5):
            torchaudio.save(
                str(
                    ensure_existence(Path.cwd() / "exclude") /
                    f"mixed_{ratio}.wav"),
                to_tensor(mix_ratio(files[0], files[-1], ratio)),
                int(sr[0]),
            )
Example #9
0
    def get_logger(
        path: Path = Path.cwd() / "0.log",
        write_to_std_out: bool = False,
    ) -> logging.Logger:
        """

        :param path:
        :type path:
        :param write_to_std_out:
        :type write_to_std_out:
        :return:
        :rtype:"""
        ensure_existence(path, declare_file=True, overwrite_on_wrong_type=True)

        handlers = [logging.FileHandler(filename=str(path))]

        if write_to_std_out:
            handlers.append(logging.StreamHandler(sys.stdout))

        logging.basicConfig(level=logging.INFO,
                            format="%(message)s",
                            handlers=handlers)

        return logging.getLogger()
Example #10
0
    def asd7ad() -> None:
        """
        :rtype: None
        """
        from pathlib import Path
        from apppath import ensure_existence
        from matplotlib import pyplot
        import numpy
        import imageio

        n = 200
        n_frames = 25
        x = numpy.linspace(-numpy.pi * 4, numpy.pi * 4, n)
        base = ensure_existence(Path("exclude"))

        def gen():
            """ """
            for i, t in enumerate(numpy.linspace(0, numpy.pi, n_frames)):
                pyplot.plot(x, numpy.cos(x + t))
                pyplot.plot(x, numpy.sin(2 * x - t))
                pyplot.plot(x, numpy.cos(x + t) + numpy.sin(2 * x - t))
                pyplot.ylim(-2.5, 2.5)
                pyplot.savefig(base / f"frame{i}.png", bbox_inches="tight", dpi=300)
                pyplot.clf()

        def asijsd():
            """ """
            files = [base / f"frame{yu}.png" for yu in range(n_frames)]
            frames = [imageio.imread(f) for f in files]
            frames = blit_numbering_raster_sequence(frames)
            imageio.mimsave(base / "output.gif", frames, fps=(n_frames / 2.0))

        def sadasf():
            """ """
            files = [base / f"frame{yu}.png" for yu in range(n_frames)]
            a = [imageio.imread(f) for f in files]
            frames = numpy.array([a, a])  # copy of itself, just for test
            fps = n_frames / 2.0
            frames = numpy.array(
                [blit_fps(blit_numbering_raster_sequence(f), fps) for f in frames]
            )
            [
                imageio.mimsave(base / f"output{i}.gif", f, fps=fps)
                for i, f in enumerate(frames)
            ]

        gen()
        sadasf()
Example #11
0
    def get_csv_writer(
            path: Path = Path.home() / "Models") -> Tuple[TextIO, Any]:
        """

        :param path:
        :type path:
        :return:
        :rtype:"""
        if path.is_dir() or path.suffix != ".csv":
            path /= "log.csv"
        csv_file = open(
            str(
                ensure_existence(path,
                                 overwrite_on_wrong_type=True,
                                 declare_file=True)),
            mode="a",
        )
        return csv_file, csv.writer(csv_file)
Example #12
0
def resize_children(
    src_path: Union[Path, str],
    size: Union[Tuple[Number, Number], Number],
    dst_path: Union[Path, str] = "resized",
    *,
    from_extensions: Iterable[str] = ("jpg", "png"),
    to_extension: "str" = "jpg",
    resize_method: ResizeMethodEnum = ResizeMethodEnum.scale_crop,
) -> None:
    target_size = (size, size)
    src_path = Path(src_path)
    dst_path = Path(dst_path)
    if not dst_path.root:
        dst_path = src_path.parent / dst_path
    for ext in progress_bar(from_extensions):
        for c in progress_bar(
                src_path.rglob(f'*.{ext.rstrip("*").rstrip(".")}')):
            image = cv2.imread(str(c))
            if resize_method == resize_method.scale:
                resized = cv2_resize(image, target_size,
                                     InterpolationEnum.area)
            elif resize_method == resize_method.crop:
                center = (image.shape[0] / 2, image.shape[1] / 2)
                x = int(center[1] - target_size[0] / 2)
                y = int(center[0] - target_size[1] / 2)
                resized = image[y:y + target_size[1], x:x + target_size[0]]
            elif resize_method == resize_method.scale_crop:
                resized = resize(image, width=target_size[0])
                center = (resized.shape[0] / 2, resized.shape[1] / 2)
                x = int(center[1] - target_size[0] / 2)
                y = int(center[0] - target_size[1] / 2)
                resized = resized[y:y + target_size[1], x:x + target_size[0]]
            else:
                raise NotImplementedError

            target_folder = ensure_existence(
                dst_path.joinpath(*(c.relative_to(src_path).parent.parts)))
            cv2.imwrite(
                str((target_folder / c.name).with_suffix(
                    f'.{to_extension.rstrip("*").rstrip(".")}')),
                resized,
            )
Example #13
0
 def a():
     """ """
     _path_to_events_file = next(
         AppPath("Draugr", "Christian Heider Nielsen").user_log.rglob(
             "events.out.tfevents.*"))
     print(_path_to_events_file)
     tee = TensorboardEventExporter(_path_to_events_file.parent,
                                    save_to_disk=True)
     print(tee.tags_available)
     # tee.export_csv('train_loss')
     # tee.export_line_plot('train_loss')
     # pyplot.show()
     print(tee.export_histogram())
     print(tee.available_scalars)
     print(
         tee.pr_curve_export_csv(
             *tee.tags_available["tensors"],
             out_dir=ensure_existence(Path.cwd() / "exclude"),
         ))
     print(list(iter(tee.TagTypeEnum)))
Example #14
0
def extract_scalars_as_csv(
    train_path: Path = EXPORT_RESULTS_PATH / "csv" / "training",
    test_path: Path = EXPORT_RESULTS_PATH / "csv" / "testing",
    export_train: bool = True,
    export_test: bool = True,
    verbose: bool = False,
    only_extract_from_latest_event_file: bool = False,
) -> None:
    """
    :param train_path:
    :param test_path:
    :param export_train:
    :param export_test:
    :param verbose:
    :param only_extract_from_latest_event_file:
    """
    if only_extract_from_latest_event_file:
        max_load_time = max(
            list(
                AppPath(
                    "Adversarial Speech", "Christian Heider Nielsen"
                ).user_log.iterdir()
            ),
            key=os.path.getctime,
        )
        unique_event_files_parents = set(
            [ef.parent for ef in max_load_time.rglob("events.out.tfevents.*")]
        )
        event_files = {max_load_time: unique_event_files_parents}
    else:
        event_files = {
            a: set([ef.parent for ef in a.rglob("events.out.tfevents.*")])
            for a in list(
                AppPath(
                    "Adversarial Speech", "Christian Heider Nielsen"
                ).user_log.iterdir()
            )
        }

    for k, v in progress_bar(event_files.items()):
        for e in progress_bar(v):
            relative_path = e.relative_to(k)
            mapping_id, *rest = relative_path.parts
            mappind_id_test = f"{mapping_id}_Test_{relative_path.name}"
            # model_id = relative_path.parent.name can be include but is always the same
            relative_path = Path(*(mappind_id_test, *rest))
            with TensorboardEventExporter(e, save_to_disk=True) as tee:
                if export_test:
                    out_tags = []
                    for tag in progress_bar(TestingScalars):
                        if tag.value in tee.available_scalars:
                            out_tags.append(tag.value)

                    if len(out_tags):
                        tee.scalar_export_csv(
                            *out_tags,
                            out_dir=ensure_existence(
                                test_path / k.name / relative_path,
                                force_overwrite=True,
                                verbose=verbose,
                            ),
                        )
                        print(e)
                    else:
                        if verbose:
                            print(
                                f"{e}, no requested tags found {TestingScalars.__members__.values()}, {tee.available_scalars}"
                            )

                if export_train:
                    out_tags = []
                    for tag in progress_bar(TrainingScalars):
                        if tag.value in tee.available_scalars:
                            out_tags.append(tag.value)

                    if len(out_tags):
                        tee.scalar_export_csv(
                            *out_tags,
                            out_dir=ensure_existence(
                                train_path / k.name / relative_path,
                                force_overwrite=True,
                                verbose=verbose,
                            ),
                        )
                    else:
                        if verbose:
                            print(
                                f"{e}, no requested tags found {TrainingScalars.__members__.values()}, {tee.available_scalars}"
                            )
def export_detection_model(
    cfg: NOD,
    model_ckpt: Path,
    model_export_path: Path = Path("torch_model"),
    verbose: bool = True,
    onnx_export: bool = False,
    strict_jit: bool = False,
    ) -> None:
  """

:param verbose:
:type verbose:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param model_export_path:
:type model_export_path:
:return:
:rtype:
"""
  model = SingleShotDectectionNms(cfg)

  checkpointer = CheckPointer(
      model, save_dir=ensure_existence(PROJECT_APP_PATH.user_data / "results")
      )
  checkpointer.load(model_ckpt, use_latest=model_ckpt is None)
  print(
      f"Loaded weights from {model_ckpt if model_ckpt else checkpointer.get_checkpoint_file()}"
      )

  model.post_init()
  model.to(global_torch_device())

  transforms = SSDTransform(
      cfg.input.image_size, cfg.input.pixel_mean, split=Split.Testing
      )
  model.eval()

  pre_quantize_model = False
  if pre_quantize_model:  # Accuracy may drop!
    if True:
      model = quantization.quantize_dynamic(model, dtype=torch.qint8)
    else:
      pass
      # model = quantization.quantize(model)

  frame_g = frame_generator(cv2.VideoCapture(0))
  for image in tqdm(frame_g):
    example_input = (transforms(image)[0].unsqueeze(0).to(global_torch_device()),)
    try:
      traced_script_module = torch.jit.script(
          model,
          # example_input,
          )
      exp_path = model_export_path.with_suffix(".compiled")
      traced_script_module.save(str(exp_path))
      print(f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}")
      sprint(
          f"Successfully exported JIT Traced model at {exp_path}", color="green"
          )
    except Exception as e_i:
      sprint(f"Torch JIT Trace export does not work!, {e_i}", color="red")

    break
Example #16
0
        null_color="red")  # element wise
    # df.style.bar(subset=['A', 'B'], color='#d65f5f')
    # df.style.bar(subset=['A', 'B'], align='mid', color=['#d65f5f', '#5fba7d'])
    df = df.applymap(color_highlight_extreme)  # .format(None, na_rep="-")
    df = df.apply(color_highlight_extreme, color="darkorange")
    df = df.apply(
        color_highlight_extreme,
        extreme_func=NDFrameExtremeEnum.max,
        color="green",
        axis=None,
    )

    html_ = df.render()
    # html_ = df.to_html()
    # from IPython.display import display, HTML
    # display((HTML(),))
    # print(tabulate(df, headers = 'keys', tablefmt = 'psql'))
    with open(ensure_existence(Path("exclude")) / "style_test.html", "w") as f:
        f.write(html_)
    """
Styler.applymap(func) for elementwise styles

Styler.apply(func, axis=0) for columnwise styles

Styler.apply(func, axis=1) for rowwise styles

Styler.apply(func, axis=None) for tablewise styles

And crucially the input and output shapes of func must match. If x is the input then func(x).shape == x.shape.
"""
Example #17
0
def export_detection_model(
    cfg: NOD,
    model_checkpoint: Path,
    model_export_path: Path = Path("torch_model"),
    verbose: bool = True,
    onnx_export: bool = False,
    strict_jit: bool = False,
) -> None:
    """

    :param verbose:
    :type verbose:
    :param cfg:
    :type cfg:
    :param model_checkpoint:
    :type model_checkpoint:
    :param model_export_path:
    :type model_export_path:
    :return:
    :rtype:"""
    model = SingleShotDetection(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_checkpoint, use_latest=model_checkpoint is None)
    print(
        f"Loaded weights from {model_checkpoint if model_checkpoint else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    transforms = SSDTransform(cfg.input.image_size,
                              cfg.input.pixel_mean,
                              split=SplitEnum.testing)
    model.eval()  # Important!

    fuse_quantize_model = False
    if fuse_quantize_model:
        modules_to_fuse = [
            ["conv", "bn", "relu"]
        ]  # Names of modules to fuse, maybe supply directly for architecture class/declaration
        model = torch.quantization.fuse_modules(
            model, modules_to_fuse=modules_to_fuse, inplace=False)

    pre_quantize_model = False
    if pre_quantize_model:  # Accuracy may drop!
        if True:
            model = quantization.quantize_dynamic(model, dtype=torch.qint8)
        else:
            pass
            # model = quantization.quantize(model)

    frame_g = frame_generator(cv2.VideoCapture(0))
    for image in tqdm(frame_g):
        example_input = (transforms(image)[0].unsqueeze(0).to(
            global_torch_device()), )
        try:
            if onnx_export:
                exp_path = model_export_path.with_suffix(".onnx")
                output = onnx.export(
                    model,
                    example_input,
                    str(exp_path),
                    verbose=verbose,
                    # export_params=True,  # store the trained parameter weights inside the model file
                    # opset_version=10,  # the onnx version to export the model to
                    # do_constant_folding=True,  # wether to execute constant folding for optimization
                    # input_names=["input"],  # the model's input names
                    # output_names=["output"],  # the model's output names
                    # dynamic_axes={
                    #  "input": {0: "batch_size"},  # variable lenght axes
                    #  "output": {0: "batch_size"},
                    #  }
                )
                sprint(f"Successfully exported ONNX model at {exp_path}",
                       color="blue")
            else:
                raise Exception("Just trace instead, ignore exception")
        except Exception as e:
            sprint(f"Torch ONNX export does not work, {e}", color="red")
            try:
                traced_script_module = torch.jit.trace(
                    model,
                    example_input,
                    # strict=strict_jit,
                    check_inputs=(
                        transforms(next(frame_g))[0].unsqueeze(0).to(
                            global_torch_device()),
                        transforms(next(frame_g))[0].unsqueeze(0).to(
                            global_torch_device()),
                    ),
                )
                exp_path = model_export_path.with_suffix(".traced")
                traced_script_module.save(str(exp_path))
                print(
                    f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}"
                )
                sprint(
                    f"Successfully exported JIT Traced model at {exp_path}",
                    color="green",
                )
            except Exception as e_i:
                sprint(f"Torch JIT Trace export does not work!, {e_i}",
                       color="red")

        break
    """
Example #18
0
def run_demo(cfg, class_names, model_ckpt, score_threshold, images_dir,
             output_dir):
    model = SingleShotDectection(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_ckpt, use_latest=model_ckpt is None)
    print(
        f"Loaded weights from {model_ckpt if model_ckpt else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    image_paths = list(images_dir.iterdir())

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(cfg.input.image_size,
                              cfg.input.pixel_mean,
                              split=Split.Testing)
    model.eval()

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = numpy.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(global_torch_device()))[0]
        inference_time = time.time() - start

        result.boxes[:, 0::2] *= width / result.img_width
        result.boxes[:, 1::2] *= height / result.img_height
        (boxes, labels, scores) = (
            result.boxes.to(cpu_device).numpy(),
            result.labels.to(cpu_device).numpy(),
            result.scores.to(cpu_device).numpy(),
        )

        indices = scores > score_threshold
        boxes, labels, scores = boxes[indices], labels[indices], scores[
            indices]
        meters = " | ".join([
            f"objects {len(boxes):02d}",
            f"load {round(load_time * 1000):03d}ms",
            f"inference {round(inference_time * 1000):03d}ms",
            f"FPS {round(1.0 / inference_time)}",
        ])
        print(f"({i + 1:04d}/{len(image_paths):04d}) {image_name}: {meters}")

        drawn_image = draw_bounding_boxes(
            image,
            boxes,
            labels,
            scores,
            class_names,
            score_font=ImageFont.truetype(
                PACKAGE_DATA_PATH / "Lato-Regular.ttf",
                24,
            ),
        ).astype(numpy.uint8)
        Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
    def main():
        dataset_root = Path.home() / "Data"
        base_path = ensure_existence(PROJECT_APP_PATH.user_data / 'maskrcnn')
        log_path = ensure_existence(PROJECT_APP_PATH.user_log / 'maskrcnn')
        export_root = ensure_existence(base_path / 'models')
        model_name = f'maskrcnn_pennfudanped'

        batch_size = 4
        num_epochs = 10
        optimiser_spec = GDKC(torch.optim.Adam, lr=3e-4)
        scheduler_spec = GDKC(
            torch.optim.lr_scheduler.
            StepLR,  # a learning rate scheduler which decreases the learning rate by
            step_size=3,  # 10x every 3 epochs
            gamma=0.1,
        )
        num_workers = os.cpu_count()
        torch_seed(3825)

        dataset = PennFudanDataset(dataset_root / "PennFudanPed",
                                   Split.Training,
                                   return_variant=ReturnVariant.all)
        dataset_validation = PennFudanDataset(
            dataset_root / "PennFudanPed",
            Split.Validation,
            return_variant=ReturnVariant.all,
        )
        split = SplitIndexer(len(dataset), validation=0.3, testing=0)

        split_indices = torch.randperm(split.total_num).tolist()

        data_loader = DataLoader(
            Subset(dataset, split_indices[:-split.validation_num]),
            batch_size=batch_size,
            shuffle=True,
            num_workers=num_workers,
            collate_fn=collate_batch_fn,
        )

        data_loader_val = DataLoader(
            Subset(dataset_validation, split_indices[-split.validation_num:]),
            batch_size=1,
            shuffle=False,
            num_workers=num_workers,
            collate_fn=collate_batch_fn,
        )

        model = get_pretrained_instance_segmentation_maskrcnn(
            dataset.response_channels)
        optimiser = optimiser_spec(trainable_parameters(model))
        lr_scheduler = scheduler_spec(optimiser)

        if True:
            model = load_model(model_name=model_name,
                               model_directory=export_root)

        if True:
            with TorchTrainSession(model):
                with TensorBoardPytorchWriter(log_path / model_name) as writer:
                    for epoch_i in tqdm(range(num_epochs), desc="Epoch #"):
                        maskrcnn_train_single_epoch(model=model,
                                                    optimiser=optimiser,
                                                    data_loader=data_loader,
                                                    writer=writer)
                        lr_scheduler.step()  # update the learning rate
                        maskrcnn_evaluate(
                            model, data_loader_val, writer=writer
                        )  # evaluate on the validation dataset
                        save_model(model,
                                   model_name=model_name,
                                   save_directory=export_root)

        if True:
            with TorchEvalSession(model):  # put the model in evaluation mode
                img, _ = dataset_validation[
                    0]  # pick one image from the test set

                with torch.no_grad():
                    prediction = model([img.to(global_torch_device())])

                from matplotlib import pyplot
                pyplot.imshow(
                    Image.fromarray(
                        img.mul(255).permute(1, 2, 0).byte().numpy()))
                pyplot.show()

                import cv2

                pyplot.imshow(
                    Image.fromarray(prediction[0]["masks"][0, 0].mul(
                        255).byte().cpu().numpy()))
                pyplot.show()

                (boxes, labels, scores) = (
                    prediction[0]["boxes"].to('cpu').numpy(),
                    prediction[0]["labels"].to('cpu').numpy(),
                    torch.sigmoid(prediction[0]["scores"]).to('cpu').numpy(),
                )

                from draugr.opencv_utilities import draw_bounding_boxes
                from draugr.torch_utilities.images.conversion import quick_to_pil_image

                indices = scores > 0.1

                cv2.namedWindow(model_name, cv2.WINDOW_NORMAL)
                cv2.imshow(
                    model_name,
                    draw_bounding_boxes(
                        quick_to_pil_image(img),
                        boxes[indices],
                        labels=labels[indices],
                        scores=scores[indices],
                        #categories=categories,
                    ))

                cv2.waitKey()
Example #20
0
def run_webcam_demo(
    cfg: NOD,
    categories: Sequence[str],
    model_checkpoint: Path,
    score_threshold: float = 0.5,
    window_name: str = "SSD",
):
    """

    :param categories:
    :type categories:
    :param cfg:
    :type cfg:
    :param model_checkpoint:
    :type model_checkpoint:
    :param score_threshold:
    :type score_threshold:
    :param window_name:
    :type window_name:
    :return:
    :rtype:"""

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(cfg.input.image_size,
                              cfg.input.pixel_mean,
                              split=SplitEnum.testing)
    model = SingleShotDetection(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_checkpoint, use_latest=model_checkpoint is None)
    print(
        f"Loaded weights from {model_checkpoint if model_checkpoint else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    with TorchEvalSession(model):
        for infos in tqdm(DictUnityEnvironment(connect_to_running=True)):
            info = next(iter(infos.values()))
            new_images = extract_all_cameras(info)
            image = next(iter(new_images.values()))[..., :3][..., ::-1]
            image = gamma_correct_float_to_byte(image)
            result = model(
                transforms(image)[0].unsqueeze(0).to(global_torch_device()))[0]
            height, width, *_ = image.shape

            result["boxes"][:, 0::2] *= width / result["img_width"]
            result["boxes"][:, 1::2] *= height / result["img_height"]
            (boxes, labels, scores) = (
                result["boxes"].to(cpu_device).numpy(),
                result["labels"].to(cpu_device).numpy(),
                result["scores"].to(cpu_device).numpy(),
            )

            indices = scores > score_threshold

            if show_image(
                    draw_bounding_boxes(
                        image,
                        boxes[indices],
                        labels=labels[indices],
                        scores=scores[indices],
                        categories=categories,
                    ).astype(numpy.uint8),
                    window_name,
                    wait=1,
            ):
                break  # esc to quit
Example #21
0
    x_c = [r[0] for r in digits[i][1]]
    y_c = [r[1] for r in digits[i][1]]
    z_c = [r[2] for r in digits[i][1]]

    layout = Layout(height=500,
                    width=600,
                    title=f"Digit: {str(digits[i][2])} in 3D space")
    fig = Figure(
        data=[
            Scatter3d(
                x=x_c,
                y=y_c,
                z=z_c,
                mode="markers",
                marker={
                    "size": 12,
                    "color": z_c,
                    "colorscale": "Viridis",
                    "opacity": 0.7,
                },
            )
        ],
        layout=layout,
    )
    plotly.offline.plot(
        fig,
        filename=str(
            ensure_existence(PROJECT_APP_PATH.user_cache / "mnist3d") /
            "temp-plot.html"),
    )
Example #22
0
def export_detection_model(
    model_export_path: Path = ensure_existence(
        PROJECT_APP_PATH.user_data / "penn_fudan_segmentation"
    )
    / "seg_skip_fis",
    SEED: int = 87539842,
) -> None:
    """

    :param model_export_path:
    :type model_export_path:
    :return:
    :rtype:"""

    model = OutputActivationModule(
        SkipHourglassFission(input_channels=3, output_heads=(1,), encoding_depth=1)
    )

    with TorchDeviceSession(device=global_torch_device("cpu"), model=model):
        with TorchEvalSession(model):

            seed_stack(SEED)

            # standard PyTorch mean-std input image normalization
            transform = transforms.Compose(
                [
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
                ]
            )

            frame_g = frame_generator(cv2.VideoCapture(0))

            for image in tqdm(frame_g):
                example_input = (
                    transform(image).unsqueeze(0).to(global_torch_device()),
                )

                try:
                    traced_script_module = torch.jit.trace(
                        model,
                        example_input,
                        # strict=strict_jit,
                        check_inputs=(
                            transform(next(frame_g))
                            .unsqueeze(0)
                            .to(global_torch_device()),
                            transform(next(frame_g))
                            .unsqueeze(0)
                            .to(global_torch_device()),
                        ),
                    )
                    exp_path = model_export_path.with_suffix(".traced")
                    traced_script_module.save(str(exp_path))
                    print(
                        f"Traced Ops used {torch.jit.export_opnames(traced_script_module)}"
                    )
                    sprint(
                        f"Successfully exported JIT Traced model at {exp_path}",
                        color="green",
                    )
                except Exception as e_i:
                    sprint(f"Torch JIT Trace export does not work!, {e_i}", color="red")

                break
Example #23
0
    def __init__(self, config, data_loader):
        """
Construct a new Trainer instance.

Args:
    config: object containing command line arguments.
    data_loader: A data iterator.
"""
        self.config = config

        if config.use_gpu and torch.cuda.is_available():
            self.device = torch.device("cuda")
        else:
            self.device = torch.device("cpu")

        # glimpse network params
        self.patch_size = config.patch_size
        self.glimpse_scale = config.glimpse_scale
        self.num_patches = config.num_patches
        self.loc_hidden = config.loc_hidden
        self.glimpse_hidden = config.glimpse_hidden

        # core network params
        self.num_glimpses = config.num_glimpses
        self.hidden_size = config.hidden_size

        # reinforce params
        self.std = config.std
        self.M = config.M

        # data params
        if config.is_train:
            self.train_loader = data_loader[0]
            self.valid_loader = data_loader[1]
            self.num_train = len(self.train_loader.sampler.indices)
            self.num_valid = len(self.valid_loader.sampler.indices)
        else:
            self.test_loader = data_loader
            self.num_test = len(self.test_loader.dataset)
        self.num_classes = 10
        self.num_channels = 1

        # training params
        self.epochs = config.epochs
        self.start_epoch = 0
        self.momentum = config.momentum
        self.lr = config.init_lr

        # misc params
        self.model_name = f"ram_{config.num_glimpses}_{config.patch_size}x{config.patch_size}_{config.glimpse_scale}"
        self.best = config.best
        self.ckpt_dir = config.ckpt_dir
        self.logs_dir = config.logs_dir
        self.plot_dir = config.plot_dir / self.model_name
        self.best_valid_acc = 0.0
        self.counter = 0
        self.lr_patience = config.lr_patience
        self.train_patience = config.train_patience
        self.use_tensorboard = config.use_tensorboard
        self.resume = config.resume
        self.print_freq = config.print_freq
        self.plot_freq = config.plot_freq

        ensure_existence(self.ckpt_dir)
        ensure_existence(self.logs_dir)
        ensure_existence(self.plot_dir)

        # configure tensorboard logging
        if self.use_tensorboard:
            tensorboard_dir = self.logs_dir / self.model_name
            print(f"[*] Saving tensorboard logs to {tensorboard_dir}")
            if not os.path.exists(tensorboard_dir):
                os.makedirs(tensorboard_dir)
            configure(tensorboard_dir)

        self.model = RecurrentAttention(
            self.patch_size,
            self.num_patches,
            self.glimpse_scale,
            self.num_channels,
            self.loc_hidden,
            self.glimpse_hidden,
            self.std,
            self.hidden_size,
            self.num_classes,
        ).to(self.device)

        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.config.init_lr)
        self.scheduler = ReduceLROnPlateau(self.optimizer,
                                           "min",
                                           patience=self.lr_patience)
Example #24
0
def main(
    base_path: Path = Path.home() / "Data" / "Datasets" / "PennFudanPed",
    train_model: bool = True,
    load_prev_model: bool = True,
    writer: Writer = TensorBoardPytorchWriter(PROJECT_APP_PATH.user_log /
                                              "instanced_person_segmentation" /
                                              f"{time.time()}"),
):
    """ """

    # base_path = Path("/") / "encrypted_disk" / "heider" / "Data" / "PennFudanPed"
    base_path: Path = Path.home() / "Data3" / "PennFudanPed"
    # base_path = Path('/media/heider/OS/Users/Christian/Data/Datasets/')  / "PennFudanPed"
    pyplot.style.use("bmh")

    save_model_path = (
        ensure_existence(PROJECT_APP_PATH.user_data / "models") /
        "instanced_penn_fudan_ped_seg.model")

    eval_model = not train_model
    SEED = 9221
    batch_size = 32
    num_workers = 0
    encoding_depth = 2
    learning_rate = 6e-6  # sequence 6e-2 6e-3 6e-4 6e-5

    seed_stack(SEED)

    train_set = PennFudanDataset(
        base_path,
        SplitEnum.training,
        return_variant=PennFudanDataset.PennFudanReturnVariantEnum.instanced,
    )

    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers)
    valid_loader = DataLoader(
        PennFudanDataset(
            base_path,
            SplitEnum.validation,
            return_variant=PennFudanDataset.PennFudanReturnVariantEnum.
            instanced,
        ),
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
    )

    model = SkipHourglassFission(
        input_channels=train_set.predictor_shape[-1],
        output_heads=(train_set.response_shape[-1], ),
        encoding_depth=encoding_depth,
    )
    model.to(global_torch_device())

    if load_prev_model and save_model_path.exists():
        model.load_state_dict(torch.load(str(save_model_path)))
        print("loading saved model")

    if train_model:
        with TorchTrainSession(model):
            criterion = BCEDiceLoss()
            # optimiser = torch.optim.SGD(model.parameters(), lr=learning_rate)
            optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate)
            # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(                optimiser, T_max=7, eta_min=learning_rate / 100, last_epoch=-1            )

            model = train_person_segmentor(
                model,
                train_loader,
                valid_loader,
                criterion,
                optimiser,
                save_model_path=save_model_path,
                learning_rate=learning_rate,
                writer=writer,
            )

    if eval_model:
        validate_model(model, valid_loader)
Example #25
0
def get_calender_df(calendar_id: str,
                    credentials_base_path: Path,
                    num_entries: int = 30) -> dash_table.DataTable:
    """Shows basic usage of the Google Calendar API.
    Prints the start and name of the next 10 events on the user's calendar.

    :param num_entries:
    :param credentials_base_path:
    :type calendar_id: object
    :rtype: object
    """
    credentials = None
    # The file token.pickle stores the user's access and refresh tokens, and is
    # created automatically when the authorization flow completes for the first
    # time.

    tokens_path = ensure_existence(
        credentials_base_path / "google") / "token.pickle"
    if tokens_path.exists():
        with open(tokens_path, "rb") as token:
            credentials = pickle.load(token)

    if not credentials or not credentials.valid:
        # If there are no (valid) credentials available, let the user log in.
        if credentials and credentials.expired and credentials.refresh_token:
            credentials.refresh(Request())
        else:
            a = ensure_existence(
                credentials_base_path / "google") / "credentials.json"
            if a.exists():
                flow = InstalledAppFlow.from_client_secrets_file(
                    str(a), SCOPES)
                credentials = flow.run_local_server()
            else:
                if False:
                    raise Exception(f"Missing {a}")
                else:
                    print(f"Missing {a}")

        with open(tokens_path, "wb") as token:
            # Save the credentials for the next run
            pickle.dump(credentials, token)

    df = pandas.DataFrame(columns=("start", "summary"))
    if credentials:
        service = build("calendar", "v3", credentials=credentials)

        # Call the Calendar API

        events_result = (
            service.events().list(
                calendarId=calendar_id,
                timeMin=
                f"{datetime.datetime.utcnow().isoformat()}Z",  # 'Z' indicates UTC time
                maxResults=num_entries,
                singleEvents=True,
                orderBy="startTime",
            ).execute())
        events = events_result.get("items", [])

        if not events:
            print("No upcoming events found.")
        else:
            for event in events:
                start = event["start"].get("dateTime",
                                           event["start"].get("date"))
                if "summary" in event:
                    df.loc[-1] = (start, event["summary"])  # 'summary'
                    df.index = df.index + 1
                    df = df.sort_index(ascending=False)

        df.start = df.start.map(iso_dt_to_datetime)

    return df
Example #26
0
    CONFIG.latent_size = 10
    CONFIG.print_every = 100
    GLOBAL_DEVICE = global_torch_device()
    TIMESTAMP = time.time()

    LOWEST_L = inf

    CORE_COUNT = 0  # min(8, multiprocessing.cpu_count() - 1)

    GLOBAL_DEVICE = torch.device(
        "cuda" if torch.cuda.is_available() else "cpu")
    DL_KWARGS = ({
        "num_workers": CORE_COUNT,
        "pin_memory": True
    } if torch.cuda.is_available() else {})
    BASE_PATH = ensure_existence(PROJECT_APP_PATH.user_data / "cvae")

    torch.manual_seed(CONFIG.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(CONFIG.seed)

    NAME = "MNIST"
    MODEL = ConditionalVAE(
        encoder_layer_sizes=CONFIG.encoder_layer_sizes,
        latent_size=CONFIG.latent_size,
        decoder_layer_sizes=CONFIG.decoder_layer_sizes,
        num_conditions=10,
    ).to(global_torch_device())
    DATASET = MNIST(
        root=str(PROJECT_APP_PATH.user_data / NAME),
        train=True,
Example #27
0
    def __init__(self,
                 setting_scope: SettingScopeEnum = SettingScopeEnum.site):
        """Protects from overriding on initialisation"""
        pass
        # super().__init__()
        # TODO: FIGURE OUT A WAY TO EASILY COPY SETTINGS TO ROOT; for services
        # print(f'Using settings from {PROJECT_APP_PATH.user_config}')

        _setting_scope = setting_scope
        if setting_scope == SettingScopeEnum.user:
            HeimdallrSettings._credentials_base_path = ensure_existence(
                PROJECT_APP_PATH.user_config / "credentials")
            HeimdallrSettings._google_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.user_config) /
                "google.settings")
            HeimdallrSettings._mqtt_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.user_config) /
                "mqtt.settings")

            # print(f'Using config at {PROJECT_APP_PATH.site_config}')
        elif setting_scope == SettingScopeEnum.site:
            prev_val = PROJECT_APP_PATH._ensure_existence
            PROJECT_APP_PATH._ensure_existence = False
            HeimdallrSettings._credentials_base_path = (
                PROJECT_APP_PATH.site_config / "credentials")
            if not HeimdallrSettings._credentials_base_path.exists():
                with sh.contrib.sudo(
                        password=getpass.getpass(
                            prompt=f"[sudo] password for {getpass.getuser()}: "
                        ),
                        _with=True,
                ):
                    sh.mkdir(["-p", HeimdallrSettings._credentials_base_path])
                    sh.chown(
                        f"{getpass.getuser()}:", PROJECT_APP_PATH.site_config
                    )  # If a colon but no group name follows the user name, that user is made the owner of the files and the group of the files is changed to that user's login group.
            PROJECT_APP_PATH._ensure_existence = prev_val
            HeimdallrSettings._google_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.site_config) /
                "google.settings")
            HeimdallrSettings._mqtt_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.site_config) /
                "mqtt.settings")

            # print(f'Using config at {PROJECT_APP_PATH.site_config}')
        elif setting_scope == SettingScopeEnum.root:
            prev_val = PROJECT_APP_PATH._ensure_existence
            PROJECT_APP_PATH._ensure_existence = False
            HeimdallrSettings._credentials_base_path = (
                PROJECT_APP_PATH.root_config / "credentials")
            if not HeimdallrSettings._credentials_base_path.exists():
                with sh.contrib.sudo(
                        password=getpass.getpass(
                            prompt=f"[sudo] password for {getpass.getuser()}: "
                        ),
                        _with=True,
                ):
                    sh.mkdir(["-p", HeimdallrSettings._credentials_base_path])
                    sh.chown(
                        f"{getpass.getuser()}:", PROJECT_APP_PATH.root_config
                    )  # If a colon but no group name follows the user name, that user is made the owner of the files and the group of the files is changed to that user's login group.
            PROJECT_APP_PATH._ensure_existence = prev_val
            HeimdallrSettings._google_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.root_config) /
                "google.settings")
            HeimdallrSettings._mqtt_settings_path = str(
                ensure_existence(PROJECT_APP_PATH.root_config) /
                "mqtt.settings")

            # print(f'Using config at {PROJECT_APP_PATH.site_config}')
        else:
            raise ValueError()
Example #28
0
def train_mnist(load_earlier=False, train=True, denoise: bool = True):
    """

    :param load_earlier:
    :type load_earlier:
    :param train:
    :type train:"""
    seed = 251645
    batch_size = 32

    tqdm.monitor_interval = 0
    learning_rate = 3e-3
    lr_sch_step_size = int(10e4 // batch_size)
    lr_sch_gamma = 0.1
    unet_depth = 3
    unet_start_channels = 16
    input_channels = 1
    output_channels = (input_channels,)

    home_path = PROJECT_APP_PATH
    model_file_ending = ".model"
    model_base_path = ensure_existence(PROJECT_APP_PATH.user_data / "unet_mnist")
    interrupted_name = "INTERRUPTED_BEST"
    interrupted_path = model_base_path / f"{interrupted_name}{model_file_ending}"

    torch.manual_seed(seed)

    device = global_torch_device()

    img_transform = transforms.Compose(
        [
            transforms.ToTensor(),
            MinMaxNorm(),
            transforms.Lambda(lambda tensor: torch.round(tensor)),
            # transforms.RandomErasing()
        ]
    )
    dataset = MNIST(
        PROJECT_APP_PATH.user_data / "mnist", transform=img_transform, download=True
    )
    data_iter = iter(
        cycle(DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True))
    )
    data_iter = to_device_iterator(data_iter, device)

    model = SkipHourglassFission(
        input_channels=input_channels,
        output_heads=output_channels,
        encoding_depth=unet_depth,
        start_channels=unet_start_channels,
    ).to(global_torch_device())

    optimiser_ft = optim.Adam(model.parameters(), lr=learning_rate)

    exp_lr_scheduler = optim.lr_scheduler.StepLR(
        optimiser_ft, step_size=lr_sch_step_size, gamma=lr_sch_gamma
    )

    if load_earlier:
        _list_of_files = list(
            model_base_path.rglob(f"{interrupted_name}{model_file_ending}")
        )
        if not len(_list_of_files):
            print(
                f"found no trained models under {model_base_path}{os.path.sep}**{os.path.sep}{interrupted_name}{model_file_ending}"
            )
            exit(1)
        latest_model_path = str(max(_list_of_files, key=os.path.getctime))
        print(f"loading previous model: {latest_model_path}")
        if latest_model_path is not None:
            model.load_state_dict(torch.load(latest_model_path))

    if train:
        with TensorBoardPytorchWriter(home_path.user_log / str(time.time())) as writer:
            model = training(
                model,
                data_iter,
                optimiser_ft,
                exp_lr_scheduler,
                writer,
                interrupted_path,
                denoise=denoise,
            )
            torch.save(
                model.state_dict(),
                model_base_path / f"unet_mnist_final{model_file_ending}",
            )
    else:
        inference(model, data_iter, denoise=denoise)

    torch.cuda.empty_cache()
Example #29
0
def run_webcam_demo(
    cfg: NOD,
    input_cfg: NOD,
    categories: List,
    model_ckpt: Path,
    score_threshold: float = 0.7,
    window_name: str = "SSD",
):
    """

:param categories:
:type categories:
:param cfg:
:type cfg:
:param model_ckpt:
:type model_ckpt:
:param score_threshold:
:type score_threshold:
:param window_name:
:type window_name:
:return:
:rtype:
"""

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(input_cfg.image_size,
                              input_cfg.pixel_mean,
                              split=Split.Testing)
    model = SingleShotDectectionNms(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_ckpt, use_latest=model_ckpt is None)
    print(
        f"Loaded weights from {model_ckpt if model_ckpt else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    with TorchEvalSession(model):
        for image in tqdm(frame_generator(cv2.VideoCapture(0))):
            result = model(
                transforms(image)[0].unsqueeze(0).to(global_torch_device()))
            height, width, *_ = image.shape

            result.boxes[:, 0::2] *= width / result.img_width.cpu().item()
            result.boxes[:, 1::2] *= height / result.img_height.cpu().item()
            (boxes, labels, scores) = (
                result.boxes.to(cpu_device).numpy(),
                result.labels.to(cpu_device).numpy(),
                result.scores.to(cpu_device).numpy(),
            )

            indices = scores > score_threshold

            cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
            cv2.imshow(
                window_name,
                draw_bounding_boxes(
                    image,
                    boxes[indices],
                    labels=labels[indices],
                    scores=scores[indices],
                    categories=categories,
                    score_font=ImageFont.truetype(
                        PACKAGE_DATA_PATH / "Lato-Regular.ttf",
                        24,
                    ),
                ).astype(numpy.uint8),
            )
            if cv2.waitKey(1) == 27:
                break  # esc to quit
Example #30
0
from pathlib import Path

from matplotlib import pyplot

from apppath import ensure_existence
from draugr import PROJECT_APP_PATH
from draugr.tensorboard_utilities import TensorboardEventExporter
from draugr.writers import TrainingScalars

if __name__ == "__main__":
    save = False
    event_files = list(
        PROJECT_APP_PATH.user_log.rglob("events.out.tfevents.*"))
    if len(event_files) > 0:
        for _path_to_events_file in event_files:
            print(f"Event file: {_path_to_events_file}")
            _out_dir = Path.cwd() / "exclude" / "results"
            ensure_existence(_out_dir)
            tee = TensorboardEventExporter(_path_to_events_file.parent,
                                           save_to_disk=save)
            print(f"Available tags: {tee.tags_available}")
            tee.export_line_plot(TrainingScalars.training_loss.value,
                                 out_dir=_out_dir)
            if not save:
                pyplot.show()
    else:
        print("No events found")