Ejemplo n.º 1
0
def calculate_loss(seg, recon, depth, normals):
  ((seg_pred, seg_target),
   (recon_pred, recon_target),
   (depth_pred, depth_target),
   (normals_pred, normals_target)) = (seg, recon, depth, normals)

  seg_bce_loss = torch.nn.functional.binary_cross_entropy_with_logits(seg_pred, seg_target)
  ae_bce_loss = torch.nn.functional.binary_cross_entropy_with_logits(recon_pred, recon_target)
  normals_bce_loss = torch.nn.functional.binary_cross_entropy_with_logits(normals_pred, normals_target)
  depth_bce_loss = torch.nn.functional.binary_cross_entropy_with_logits(depth_pred, depth_target)

  pred_soft = torch.sigmoid(seg_pred)
  dice = dice_loss(pred_soft, seg_target, epsilon=1)
  jaccard = jaccard_loss(pred_soft, seg_target, epsilon=1)

  terms = NOD.dict_of(dice,
                      jaccard,
                      ae_bce_loss,
                      seg_bce_loss,
                      depth_bce_loss,
                      normals_bce_loss
                      )

  term_weight = 1 / len(terms)
  weighted_terms = [term.mean() * term_weight for term in terms.as_list()]

  loss = sum(weighted_terms)

  return NOD.dict_of(loss, terms)
Ejemplo n.º 2
0
def convert_to_coco_api(ds):
  """

:param ds:
:type ds:
:return:
:rtype:
"""
  coco_ds = COCO()
  ann_id = 0
  dataset = {"images":[], "categories":[], "annotations":[]}
  categories = set()
  for img_idx in range(len(ds)):
    # find better way to get target
    # targets = ds.get_annotations(img_idx)
    img, targets = ds[img_idx]
    image_id = targets["image_id"].item()
    dataset["images"].append(
        {"id":image_id, "height":img.shape[-2], "width":img.shape[-1]}
        )
    bboxes = targets["boxes"]
    bboxes[:, 2:] -= bboxes[:, :2]
    bboxes = bboxes.tolist()
    labels = targets["labels"].tolist()
    areas = targets["area"].tolist()
    iscrowd = targets["iscrowd"].tolist()
    masks = None
    keypoints = None
    if "masks" in targets:
      masks = targets["masks"]
      # make masks Fortran contiguous for coco_mask
      masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
    if "keypoints" in targets:
      keypoints = targets["keypoints"]
      keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
    num_objs = len(bboxes)
    for i in range(num_objs):
      ann = NOD(
          image_id=image_id,
          bbox=bboxes[i],
          category_id=labels[i],
          area=areas[i],
          iscrowd=iscrowd[i],
          id=ann_id,
          segmentation=None,
          keypoints=None,
          num_keypoints=None,
          )
      categories.add(labels[i])
      if "masks" in targets:
        ann.segmentation = coco_mask.encode(masks[i].numpy())
      if "keypoints" in targets:
        ann.keypoints = keypoints[i]
        ann.num_keypoints = sum(k != 0 for k in keypoints[i][2::3])
      dataset["annotations"].append(ann)
      ann_id += 1
  dataset["categories"] = [{"id":i} for i in sorted(categories)]
  coco_ds.dataset = dataset
  coco_ds.createIndex()
  return coco_ds
Ejemplo n.º 3
0
def session_factory(
    agent: Type[AgentType] = None,
    config=None,
    *,
    session: Union[Type[EnvironmentSessionType], EnvironmentSession],
    save: bool = True,
    has_x_server: bool = True,
    skip_confirmation: bool = True,
    **kwargs,
):
    r"""
Entry point start a starting a training session with the functionality of parsing cmdline arguments and
confirming configuration to use before training and overwriting of default training configurations
"""

    if config is None:
        config = {}

    if isinstance(config, dict):
        config = NOD(**config)
    else:
        config = NOD(config.__dict__)

    if has_x_server:
        display_env = getenv("DISPLAY", None)
        if display_env is None:
            config.RENDER_ENVIRONMENT = False
            has_x_server = False

    config_mapping = config_to_mapping(config)
    config_mapping.update(**kwargs)

    config_mapping.update(save=save, has_x_server=has_x_server)

    if not skip_confirmation:
        sprint(f"\nUsing config: {config}\n", highlight=True, color="yellow")
        for key, arg in config_mapping:
            print(f"{key} = {arg}")

        input("\nPress Enter to begin... ")

    if session is None:
        raise NoProcedure
    elif inspect.isclass(session):
        session = session(**config_mapping)  # Use passed config arguments
    elif isinstance(session, GDKC):
        session = session(
            **kwargs
        )  # Assume some kw parameters is set prior to passing session, only override with explicit overrides

    try:
        session(agent, **config_mapping)
    except KeyboardInterrupt:
        print("Stopping")

    torch.cuda.empty_cache()

    exit(0)
Ejemplo n.º 4
0
def main(is_user: bool = False):
    """ """
    global LOG_WRITER
    if is_user:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.user_log)
            / f"{PROJECT_NAME}_publisher.log"
        )
    else:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.site_log)
            / f"{PROJECT_NAME}_publisher.log"
        )
    LOG_WRITER.open()
    client = mqtt.Client()
    client.on_publish = on_publish
    client.on_disconnect = on_disconnect

    HEIMDALLR_SETTINGS = HeimdallrSettings()  # TODO input scope

    client.username_pw_set(
        HEIMDALLR_SETTINGS.mqtt_username, HEIMDALLR_SETTINGS.mqtt_password
    )
    try:
        client.connect(
            HEIMDALLR_SETTINGS.mqtt_broker, HEIMDALLR_SETTINGS.mqtt_port, keepalive=60
        )
    except ValueError as ve:
        raise ValueError(
            f"{HEIMDALLR_SETTINGS._mqtt_settings_path},"
            f"{HEIMDALLR_SETTINGS.mqtt_broker},"
            f"{HEIMDALLR_SETTINGS.mqtt_port},"
            f"{ve}"
        )
    client.loop_start()

    sensor_data = NOD({HOSTNAME: pull_gpu_info()})
    next_reading = time.time()

    with IgnoreInterruptSignal():
        print("Publisher started")

        for _ in busy_indicator():
            sensor_data[HOSTNAME] = pull_gpu_info()
            s = sensor_data.as_dict()
            s = json.dumps(s)
            client.publish(ALL_CONSTANTS.MQTT_TOPIC, s, ALL_CONSTANTS.MQTT_QOS)
            next_reading += ALL_CONSTANTS.MQTT_PUBLISH_INTERVAL_SEC
            sleep_time = next_reading - time.time()

            if sleep_time > 0:
                time.sleep(sleep_time)

    # noinspection PyUnreachableCode
    LOG_WRITER.close()
    client.loop_stop()
    client.disconnect()
Ejemplo n.º 5
0
def sample_transitions():
    signals = numpy.array([[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]], numpy.float32)
    terminals = numpy.array([[0, 0, 1, 0, 0], [0, 0, 0, 0, 0]], numpy.float32)
    values = numpy.array(
        [[-100, 10, 20, 30, 40, 50], [-150, 15, 25, 35, 45, 55]], numpy.float32
    )  # Future values

    return NOD({"signals": signals, "terminals": terminals, "values": values})
Ejemplo n.º 6
0
    def asdijhsadasdad():
        """ """
        from warg import NOD

        a = NOD(a=[1, 2, 8], b=[4, 3, 99])
        print(f"ValueMapProduct{str(list(map_value_product(a.as_dict())))}")
        print(f"MapProduct{str(list(map_product(a.as_dict())))}")
        print(f"map_combinations{str(list(map_combinations(a.as_dict())))}")
        print(f"map_permutations{str(list(map_permutations(a.as_dict())))}")
        print(f"map_combinations_with_replacement{str(list(map_combinations_with_replacement(a.as_dict())))}")
Ejemplo n.º 7
0
def pull_gpu_info(include_graphics_processes: bool = True) -> dict:
    """Get all information about all your graphics cards.

    Returns:
      dict: The returned result is a dict with 3 keys: count, driver_version and devices:
          count: Number of gpus found
          driver_version: The version of the system’s graphics driver
          devices: It's a list and every item is a namedtuple Device which has 10 fields, for exzample id,
          name and fan_speed etc.
                   It should be noted that the Process field is also a namedtuple which has 11 fields."""

    driver_version, devices = get_nv_info(include_graphics_processes)

    info = NOD()

    info["count"] = len(devices)
    info["driver_version"] = driver_version
    info["devices"] = devices
    return info.as_dict()
Ejemplo n.º 8
0
def main(setting_scope: SettingScopeEnum = SettingScopeEnum.user):
    """ """
    global LOG_WRITER
    if setting_scope == SettingScopeEnum.user:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.user_log) /
            f"{PROJECT_NAME}_publisher.log")
    else:
        LOG_WRITER = LogWriter(
            ensure_existence(PROJECT_APP_PATH.site_log) /
            f"{PROJECT_NAME}_publisher.log")
    LOG_WRITER.open()
    client = mqtt.Client()
    client.on_publish = on_publish
    client.on_disconnect = on_disconnect

    HEIMDALLR_SETTINGS = HeimdallrSettings(setting_scope)

    client.username_pw_set(HEIMDALLR_SETTINGS.mqtt_username,
                           HEIMDALLR_SETTINGS.mqtt_password)
    try:
        client.connect(HEIMDALLR_SETTINGS.mqtt_broker,
                       HEIMDALLR_SETTINGS.mqtt_port,
                       keepalive=60)
    except ValueError as ve:
        raise ValueError(f"{HEIMDALLR_SETTINGS._mqtt_settings_path},"
                         f"{HEIMDALLR_SETTINGS.mqtt_broker},"
                         f"{HEIMDALLR_SETTINGS.mqtt_port},"
                         f"{ve}")

    client.loop_start()

    sensor_data = NOD({HOSTNAME: pull_gpu_info()})

    if True:  # with IgnoreInterruptSignal():
        print("Publisher started")

        def job():
            """ """
            sensor_data[HOSTNAME] = pull_gpu_info()
            s = sensor_data.as_dict()
            s = json.dumps(s)
            client.publish(ALL_CONSTANTS.MQTT_TOPIC, s, ALL_CONSTANTS.MQTT_QOS)

        schedule.every(ALL_CONSTANTS.MQTT_PUBLISH_INTERVAL_SEC).seconds.do(job)

        for _ in busy_indicator():
            schedule.run_pending()
            time.sleep(1)

    # noinspection PyUnreachableCode
    LOG_WRITER.close()
    client.loop_stop()
    client.disconnect()
Ejemplo n.º 9
0
    def create_licenses(self):
        """Creates the "license" portion of the COCO json"""
        license_json = self._dataset_info["license"]

        return [
            NOD(
                url=license_json["url"],
                id=license_json["id"],
                name=license_json["name"],
            )
        ]
Ejemplo n.º 10
0
    def create_info(self):
        """Creates the "info" piece of the COCO json"""
        info_json = self._dataset_info["info"]

        return NOD(
            description=info_json["description"],
            version=info_json["version"],
            url=info_json["url"],
            year=info_json["year"],
            contributor=info_json["contributor"],
            date_created=info_json["date_created"],
        )
Ejemplo n.º 11
0
def main():
    from configs.mobilenet_v2_ssd320_voc0712 import base_cfg

    # from configs.efficient_net_b3_ssd300_voc0712 import base_cfg
    # from configs.vgg_ssd300_voc0712 import base_cfg

    parser = argparse.ArgumentParser(
        description="Single Shot MultiBox Detector Training With PyTorch"
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "--log_step", default=10, type=int, help="Print logs every log_step"
    )
    parser.add_argument(
        "--save_step", default=2500, type=int, help="Save checkpoint every save_step"
    )
    parser.add_argument(
        "--eval_step",
        default=2500,
        type=int,
        help="Evaluate dataset every eval_step, disabled when eval_step < 0",
    )
    parser.add_argument("--use_tensorboard", default=True, type=str2bool)
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    args = parser.parse_args()
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    args.num_gpus = num_gpus

    set_benchmark_device_dist(args.distributed, args.local_rank)
    logger = setup_distributed_logger(
        "SSD",
        global_distribution_rank(),
        ensure_existence(PROJECT_APP_PATH.user_data / "results")
    )
    logger.info(f"Using {num_gpus} GPUs")
    logger.info(args)
    with TorchCacheSession():
        model = train_ssd(
            base_cfg.data_dir,
            base_cfg,
            base_cfg.solver,
            NOD(**args.__dict__)
        )

    if not args.skip_test:
        logger.info("Start evaluating...")
        do_ssd_evaluation(base_cfg, model, distributed=args.distributed)
Ejemplo n.º 12
0
def create_coco_image(image_path, image_id, image_license):
    """Creates the "image" portion of COCO json"""
    # Open the image and get the size
    image_file = Image.open(image_path)
    width, height = image_file.size

    return NOD(
        license=image_license,
        file_name=image_path.name,
        width=width,
        height=height,
        id=image_id,
    )
Ejemplo n.º 13
0
def get_terminal_size() -> NOD:
    """

    :return:
    :rtype:"""
    try:
        size = shutil.get_terminal_size()
        columns, rows = size.columns, size.lines
    except:
        rows, columns = (os.getenv("LINES", 25), os.getenv("COLUMNS", 80))

    rows, columns = int(rows), int(columns)

    return NOD(rows=rows, columns=columns)
Ejemplo n.º 14
0
    def asdijha() -> None:
        """
        :rtype: None
        """
        from warg import NOD

        a = NOD(a=[1], b=[4], c=[8])
        print(f"ValueMapProduct{str(list(map_value_product(a.as_dict())))}")
        print(f"MapProduct{str(list(map_product(a.as_dict())))}")
        print(f"map_combinations{str(list(map_combinations(a.as_dict())))}")
        print(f"map_permutations{str(list(map_permutations(a.as_dict())))}")
        print(
            f"map_combinations_with_replacement{str(list(map_combinations_with_replacement(a.as_dict())))}"
        )
Ejemplo n.º 15
0
    def train(self, **explicit_overrides) -> None:
        """

@param explicit_overrides: Accepts kwarg overrides to config
@return:
"""
        default_config = NOD(AGENT_CONFIG[self.agent_key])

        config_overrides = upper_dict(explicit_overrides)
        for key, arg in config_overrides.items():
            setattr(default_config, key, arg)

        print("Explicit Overrides:")
        print(explicit_overrides)
        #print(default_config)

        self.agent_callable(config=default_config, **explicit_overrides)
Ejemplo n.º 16
0
def calculate_loss(seg, recon, depth, normals):
    """

  :param seg:
  :type seg:
  :param recon:
  :type recon:
  :param depth:
  :type depth:
  :param normals:
  :type normals:
  :return:
  :rtype:
  """
    (
        (seg_pred, seg_target),
        (recon_pred, recon_target),
        (depth_pred, depth_target),
        (normals_pred, normals_target),
    ) = (seg, recon, depth, normals)

    seg_bce_loss = torch.nn.functional.binary_cross_entropy_with_logits(
        seg_pred, seg_target
    )
    ae_bce_loss = torch.nn.functional.binary_cross_entropy_with_logits(
        recon_pred, recon_target
    )
    normals_bce_loss = torch.nn.functional.binary_cross_entropy_with_logits(
        normals_pred, normals_target
    )
    depth_bce_loss = torch.nn.functional.binary_cross_entropy_with_logits(
        depth_pred, depth_target
    )

    pred_soft = torch.sigmoid(seg_pred)
    dice = dice_loss(pred_soft, seg_target, epsilon=1)
    jaccard = jaccard_loss(pred_soft, seg_target, epsilon=1)

    terms = (dice, jaccard, ae_bce_loss, seg_bce_loss, depth_bce_loss, normals_bce_loss)

    term_weight = 1 / len(terms)
    weighted_terms = [term.mean() * term_weight for term in terms]

    loss = sum(weighted_terms)

    return NOD(loss=loss, terms=terms)
Ejemplo n.º 17
0
def get_ram_config():
    config, unparsed = parser.parse_known_args()

    ram_base = "ram"
    config = NOD(**config.__dict__)
    # config.data_dir = PROJECT_APP_PATH.user_data / ram_base / 'data'
    config.data_dir = Path.home() / "Data" / "vision_sample_data"
    config.ckpt_dir = PROJECT_APP_PATH.user_data / ram_base / "ckpt"
    config.logs_dir = PROJECT_APP_PATH.user_log / ram_base / "logs"
    config.plot_dir = PROJECT_APP_PATH.user_log / ram_base / "plots"

    return config  # , unparsed
Ejemplo n.º 18
0
    def __call__(self, image: ndarray, target: Mapping[str, Any]) -> Tuple:
        w, h = image.size

        image_id = torch.tensor([target["image_id"]])

        anno = [obj for obj in target["annotations"] if obj.iscrowd == 0]
        boxes = [obj.BoundingBox for obj in anno]

        # guard against no boxes via resizing
        boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
        boxes[:, 2:] += boxes[:, :2]
        boxes[:, 0::2].clamp_(min=0, max=w)
        boxes[:, 1::2].clamp_(min=0, max=h)

        classes = torch.tensor([obj.category_id for obj in anno],
                               dtype=torch.int64)

        masks = convert_coco_poly_to_mask([obj.segmentation for obj in anno],
                                          h, w)

        keypoints = None
        if anno and anno[0].Keypoints is not None:
            keypoints = [obj.Keypoints for obj in anno]
            keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
            num_keypoints = keypoints.shape[0]
            if num_keypoints:
                keypoints = keypoints.view(num_keypoints, -1, 3)

        keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])

        target = NOD(
            boxes=boxes[keep],
            labels=classes[keep],
            masks=masks[keep],
            image_id=image_id,
            area=torch.tensor([obj.area for obj in anno]),
            iscrowd=torch.tensor([obj.iscrowd for obj in anno]),
            keypoints=None,
        )

        if keypoints is not None:
            target.keypoints = keypoints[keep]

        return image, target
Ejemplo n.º 19
0
    def create_categories(self):
        """Creates the "categories" portion of the COCO json
        Returns:
        categories: category objects that become part of the final json
        category_ids_by_name: a lookup dictionary for category ids based
            on the name of the category"""

        categories = []
        category_ids_by_name = dict()
        category_id = 1  # 0 is reserved for the background

        super_categories = self._mask_definitions["super_categories"]
        for super_category, _categories in super_categories.items():
            for category_name in _categories:
                categories.append(
                    NOD(supercategory=super_category,
                        id=category_id,
                        name=category_name))
                category_ids_by_name[category_name] = category_id
                category_id += 1

        return categories, category_ids_by_name
Ejemplo n.º 20
0
def main(**config):
    """

    Args:
      config:
    """

    config = NOD(**config)

    torch.manual_seed(config.random_seed)  # ensure reproducibility
    kwargs = {}
    if config.use_gpu:
        torch.cuda.manual_seed(config.random_seed)
        kwargs["num_workers"] = 1
        kwargs["pin_memory"] = True

    if config.is_train:  # instantiate data loaders
        data_loader = MNISTDataset.get_train_valid_loader(
            config.data_dir,
            batch_size=config.batch_size,
            random_seed=config.random_seed,
            valid_size=config.valid_size,
            shuffle=config.shuffle,
            **kwargs,
        )
    else:
        data_loader = MNISTDataset.get_test_loader(config.data_dir,
                                                   config.batch_size, **kwargs)

    trainer = Trainer(data_loader, **config)

    with PytorchTensorboardWriter() as writer:
        if config.is_train:
            trainer.train(writer=writer)
        else:  # or load a pretrained model and test
            trainer.test(writer=writer)
Ejemplo n.º 21
0
def test_model(model, data_iterator, latest_model_path, num_columns: int = 2):
    model = model.eval().to(global_torch_device())

    inputs, labels = next(data_iterator)

    inputs = inputs.to(global_torch_device())
    labels = labels.to(global_torch_device())
    with torch.no_grad():
        pred = model(inputs)

    y_pred = pred.data.to("cpu").numpy()
    y_pred_max = numpy.argmax(y_pred, axis=-1)
    accuracy_w = accuracy_score(labels, y_pred_max)
    precision_a, recall_a, fscore_a, support_a = precision_recall_fscore_support(
        labels, y_pred_max)
    precision_w, recall_w, fscore_w, support_w = precision_recall_fscore_support(
        labels, y_pred_max, average="weighted")

    _, predicted = torch.max(pred, 1)

    truth_labels = labels.data.to("cpu").numpy()

    input_images_rgb = [
        default_torch_retransform(x) for x in inputs.to(global_torch_device())
    ]

    cell_width = (800 / num_columns) - 6 - 6 * 2

    pyplot.plot(numpy.random.random((3, 3)))

    alphabet = string.ascii_lowercase
    class_names = numpy.array([*alphabet])

    samples = len(y_pred)
    predictions = [[None for _ in range(num_columns)]
                   for _ in range(samples // num_columns)]
    for i, a, b, c in zip(range(samples), input_images_rgb, y_pred_max,
                          truth_labels):
        pyplot.imshow(a)
        if b == c:
            outcome = "tp"
        else:
            outcome = "fn"

        gd = ReportEntry(
            name=i,
            figure=plt_html(a, format="jpg", size=(cell_width, cell_width)),
            prediction=class_names[b],
            truth=class_names[c],
            outcome=outcome,
            explanation=None,
        )

        predictions[i // num_columns][i % num_columns] = gd

    cfmat = confusion_matrix_plot(y_pred_max, truth_labels, class_names)

    title = "Classification Report"
    model_name = latest_model_path
    confusion_matrix = plt_html(cfmat, format="png", size=(800, 800))

    accuracy = generate_math_html("\dfrac{tp+tn}{N}"), None, accuracy_w
    precision = generate_math_html(
        "\dfrac{tp}{tp+fp}"), precision_a, precision_w
    recall = generate_math_html("\dfrac{tp}{tp+fn}"), recall_a, recall_w
    f1_score = (
        generate_math_html("2*\dfrac{precision*recall}{precision+recall}"),
        fscore_a,
        fscore_w,
    )
    support = generate_math_html("N_{class_truth}"), support_a, support_w
    metrics = NOD.nod_of(accuracy, precision, f1_score, recall,
                         support).as_flat_tuples()

    bundle = NOD.nod_of(title, model_name, confusion_matrix, metrics,
                        predictions)

    file_name = Path(title.lower().replace(" ", "_"))

    generate_html(file_name.with_suffix(".html"), **bundle)
    generate_pdf(file_name.with_suffix(".html"), file_name.with_suffix(".pdf"))
Ejemplo n.º 22
0
from draugr.torch_utilities.tensors.tensor_container import NamedTensorTuple
from draugr.torch_utilities import Split
from warg import NOD


class CocoModeEnum(Enum):
  instances = "instances"
  person_keypoints = "person_keypoints"


CocoPolyAnnotation = NOD({
    "image_id":     None,
    "bbox":         None,
    "category_id":  None,
    "area":         None,
    "iscrowd":      None,
    "id":           None,
    "segmentation": None,
    "keypoints":    None,
    "num_keypoints":None
    })

CocoMask = NOD({"boxes":    None,
                "labels":   None,
                "masks":    None,
                "image_id": None,
                "area":     None,
                "iscrowd":  None,
                "keypoints":None
                })
Ejemplo n.º 23
0
from pathlib import Path

from warg import NOD

MQTT_CAM_CONFIG = NOD(
    mqtt=NOD(
        broker="localhost", port=1883, QOS=1
    ),  # or an ip address like 192.168.1.74
    camera=NOD(
        video_source=0,
        fps=30,  # 2
        mqtt_topic="video/video0/capture",
        # If your desired camera is listed as source 0 you will configure video_source: 0. Alternatively
        # you can configure the video source as an MJPEG or RTSP stream. For example in config.yml you may
        # configure something like video_source: "rtsp://*****:*****@192.168.1.94:554/11" for a RTSP
        # camera.
    ),
    processing=NOD(
        subscribe_topic="video/video0/capture",
        publish_topic="video/video0/capture/rotated",
    ),
    save_captures=NOD(
        mqtt_topic="video/video0/capture", captures_directory=Path("captures")
    ),
)
Ejemplo n.º 24
0
        TRAIN_DIR.mkdir(parents=True)

    DATA_DIR = PROJECT_APP_PATH.user_data / "vanilla_vae" / "data"

    if not DATA_DIR.exists():
        DATA_DIR.mkdir(parents=True)

    cfg = NOD(
        latent_size=128,
        variational="flow",
        flow_depth=2,
        data_size=784,
        learning_rate=0.001,
        batch_size=128,
        test_batch_size=512,
        max_iterations=100000,
        log_interval=10000,
        early_stopping_interval=5,
        n_samples=128,
        use_gpu=True,
        train_dir=TRAIN_DIR,
        data_dir=DATA_DIR,
        seed=42,
    )

    device = torch.device("cuda" if cfg.use_gpu else "cpu")

    torch.manual_seed(cfg.seed)
    numpy.random.seed(cfg.seed)
    random.seed(cfg.seed)
Ejemplo n.º 25
0
 def __crystallise__(self) -> NOD:
     return NOD({k: getattr(self, k) for k in self})
Ejemplo n.º 26
0
def get_nv_info(include_graphics_processes: bool = True):
    devices = []
    try:
        driver_version = bindings.nvmlSystemGetDriverVersion().decode()
        device_count = bindings.nvmlDeviceGetCount()

        for device_i in range(device_count):
            handle = bindings.nvmlDeviceGetHandleByIndex(device_i)
            device_name = bindings.nvmlDeviceGetName(handle).decode()
            gpu_mem_info = bindings.nvmlDeviceGetMemoryInfo(handle)

            gpu_processes = bindings.nvmlDeviceGetComputeRunningProcesses(
                handle)
            if include_graphics_processes:
                gpu_processes = (
                    gpu_processes +
                    bindings.nvmlDeviceGetGraphicsRunningProcesses(handle))

            processes_info = []

            for p in gpu_processes:
                pid = p.pid
                used_gpu_mem = p.usedGpuMemory
                p = psutil.Process(pid=pid)
                _ = p.cpu_percent()
                time.sleep(
                    0.1
                )  # Recommended to preprobe and sleep for atleast 0.1 seconds.
                processes_info.append(
                    NOD(
                        used_gpu_mem=used_gpu_mem,
                        device_idx=device_i,
                        name=p.name(),
                        username=p.username(),
                        memory_percent=p.memory_percent(),
                        cpu_percent=p.cpu_percent(),
                        cmdline=" ".join(p.cmdline()),
                        device_name=device_name,
                        create_time=p.create_time(),
                        status=p.status(),
                        pid=pid,
                    ).as_dict())
            """
try:
  fan_speed = pynvml.nvmlDeviceGetFanSpeed(handle)
  power_usage = pynvml.nvmlDeviceGetPowerUsage(handle)  # milliwatts mW
except pynvml.NVMLError_NotSupported as e:
  fan_speed = None
  power_usage = None

power_state = pynvml.nvmlDeviceGetPowerState(handle)
temperature = pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU)
"""

            devices.append(
                NOD(
                    id=device_i,
                    name=device_name,
                    free=gpu_mem_info.free,
                    used=gpu_mem_info.used,
                    total=gpu_mem_info.total,
                    processes=processes_info,
                ).as_dict())
    except Exception as e:
        print(e)
        driver_version = "No nvidia driver"

    return driver_version, devices
Ejemplo n.º 27
0
from neodroidagent.common import CategoricalMLP
from neodroidagent.configs.base_config import *
from warg import NOD

CONFIG_NAME = __name__
import pathlib

CONFIG_FILE_PATH = pathlib.Path(__file__)

# Architecture
POLICY_ARCH_SPEC = GDKC(
    CategoricalMLP,
    NOD(
        input_shape=None,  # Obtain from environment
        hidden_layers=(32, 32),  # Estimate from input and output size
        output_shape=None,  # Obtain from environment
        hidden_layer_activation=torch.relu,
        use_bias=True,
    ),
)

ROLLOUTS = 10000

ENVIRONMENT_NAME = "CartPole-v1"
"""

Description
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum
starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's
velocity.
    res = []
    for kpts in detections:
        d = {n: k.round().astype(int).tolist() for (n, k) in zip(names, kpts)}
        res.append(d)
    return res


def grab_video_frame(cap):
    ret, frame = cap.read()
    return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)


frame_i = 0
time_s = time.time()

image_axs = NOD()

env = VectorWrapper(UnityEnvironment(connect_to_running=True))
fig = pyplot.figure()
print_obs = False


def update_figures(i):
    global time_s, frame_i, image_axs

    sample = env.action_space.sample()
    obs, signal, terminated, info = env.react(sample).to_gym_like_output()
    if print_obs:
        print(i)
        for obs in info.sensors.values():
            print(obs)
Ejemplo n.º 29
0
    "DECORATIONS",
    "generate_style",
    "sprint",
    "PrintStyle",
    "scale",
    "get_terminal_size",
    "hyperlink_path",
    "hyperlink_url",
]

COLORS = NOD(
    red="31",
    green="32",
    yellow="33",
    # gray='30', #Black,
    blue="34",
    magenta="35",
    cyan="36",
    white="37",
    crimson="38",
)

DECORATIONS = NOD(
    end="0",
    bold="1",
    dim="2",
    italic="3",
    underline="4",
    underline_end="24",  # '4:0',
    double_underline="21",  # '4:2'
    # double_underline_end='24',  # '4:0'
Ejemplo n.º 30
0
SAN_CONFIG = NOD(
    dataset_type=ImageNet2012,
    dataset_path=Path.home() / "Data" / "Datasets" / "ILSVRC2012",
    arch="san",
    self_attention_type=0,
    layers=[2, 1, 2, 4, 1],
    kernels=[3, 7, 7, 7, 7],
    ignore_label=2000,
    base_lr=0.1,
    epochs=100,
    start_epoch=0,
    step_epochs=[30, 60, 90],
    label_smoothing=0.1,
    scheduler="cosine",
    momentum=0.9,
    weight_decay=0.0001,
    manual_seed=None,
    print_freq=10,
    save_freq=1,
    train_gpu=[0, 1, 2, 3, 4, 5, 6, 7],
    workers=32,  # data loader workers
    batch_size=256,  # batch size for training
    batch_size_val=
    128,  # batch size for validation during training, memory and speed tradeoff
    batch_size_test=10,  # 100,
    evaluate=True,
    # evaluate on validation set, extra gpu memory needed and small batch_size_val is recommend
    dist_url="tcp://127.0.0.1:6789",
    dist_backend="nccl",
    multiprocessing_distributed=True,
    world_size=1,
    rank=0,
    test_gpu=[0],
    test_workers=10,
    mixup_alpha=None,  #
    model_path=None,  #
    save_path=None,  #
    weight=None,  # path to initial weight (default=none)
    resume=None,  # path to latest checkpoint (default=none)
)