Exemple #1
0
def test_retrieve_fail_ef():
    dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}

    with pytest.raises(Exception):
        val = retrieve(dol, "f", expand=False)

    with pytest.raises(Exception):
        val = retrieve(dol, "a/4", expand=False)

    with pytest.raises(Exception):
        val = retrieve(dol, "b/c/e", expand=False)
Exemple #2
0
def test_retrieve_fail():
    dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}

    with pytest.raises(Exception):
        val = retrieve(dol, "f")

    with pytest.raises(Exception):
        val = retrieve(dol, "a/4")

    with pytest.raises(Exception):
        val = retrieve(dol, "b/c/e")
Exemple #3
0
def test_retrieve_default_ef():
    dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
    ref = "abc"

    val = retrieve(dol, "f", default="abc", expand=False)
    assert val == ref

    val = retrieve(dol, "a/4", default="abc", expand=False)
    assert val == ref

    val = retrieve(dol, "b/c/e", default="abc", expand=False)
    assert val == ref
Exemple #4
0
    def __init__(self, config):
        super().__init__()
        self.config = config
        in_channels = retrieve(config, "Transformer/in_channels")
        mid_channels = retrieve(config, "Transformer/mid_channels")
        hidden_depth = retrieve(config, "Transformer/hidden_depth")
        n_flows = retrieve(config, "Transformer/n_flows")
        K = retrieve(config, "Transformer/K", default=5)

        self.in_channels = in_channels

        self.flow = NSF_AR(dim=self.in_channels, hidden_dim=mid_channels, K=K)
Exemple #5
0
def test_retrieve_default_ef_callable():
    dol = {"a": [1, 2], "b": callable_leave, "e": 2}
    ref = "abc"

    val = retrieve(dol, "f", default="abc", expand=False)
    assert val == ref

    val = retrieve(dol, "a/4", default="abc", expand=False)
    assert val == ref

    val = retrieve(dol, "b/c/e", default="abc", expand=False)
    assert val == ref
Exemple #6
0
    def __init__(self, config):
        folder = retrieve(config, "Folder/folder")
        size = retrieve(config, "Folder/size", default=0)
        random_crop = retrieve(config, "Folder/random_crop", default=False)

        relpaths = sorted(os.listdir(folder))
        abspaths = [os.path.join(folder, p) for p in relpaths]
        labels = {"relpaths": relpaths}

        self.data = ImagePaths(paths=abspaths,
                               labels=labels,
                               size=size,
                               random_crop=random_crop)
Exemple #7
0
def test_retrieve_ef_callable():
    dol = {"a": [1, 2], "b": callable_leave, "e": 2}

    val = retrieve(dol, "a", expand=False)
    ref = [1, 2]
    assert val == ref

    val = retrieve(dol, "a/0", expand=False)
    ref = 1
    assert val == ref

    with pytest.raises(Exception):
        val = retrieve(dol, "b/c/d", expand=False)
Exemple #8
0
    def __init__(self, config, root, model, datasets, **kwargs):
        super().__init__(config, root, model, datasets, **kwargs)
        self.model = model

        triplet_margin = config.get("triplet_margin", 0.3)
        self.triplet_loss = TripletLoss(triplet_margin)
        self.bce_loss = torch.nn.BCELoss()
        self.bs = config["batch_size"]

        self.lr = retrieve(config, "optimizer/lr", default=1e-4)
        beta_1 = retrieve(config, "optimizer/beta_1", default=0.5)
        beta_2 = retrieve(config, "optimizer/beta_2", default=0.99)

        self.optimizer = torch.optim.Adam(model.parameters(), lr=self.lr, betas=(beta_1, beta_2))
    def __init__(self, config):
        super().__init__()
        import torch.backends.cudnn as cudnn
        cudnn.benchmark = True
        self.config = config
        self.n_split = retrieve(config, "Transformer/n_split")
        in_channel = retrieve(config, "Transformer/in_channels")
        n_flow = retrieve(config, "Transformer/n_flow_sub")
        depth_submodules = retrieve(config, "Transformer/hidden_depth")
        hidden_dim_mulitplier = retrieve(config,
                                         "Transformer/hidden_dim_multiplier")
        embedding_dim = retrieve(config, "Transformer/embedding_dim")
        self.n_classes = retrieve(config, "Transformer/num_classes")
        conditioning_option = retrieve(config,
                                       "Transformer/conditioning_option",
                                       default='none')

        self.flow = ConditionalFlatSplitFlow(
            n_scale=self.n_split,
            dim=in_channel,
            n_flow_sub=n_flow,
            submodule_depth=depth_submodules,
            hidden_dim_mulitplier=hidden_dim_mulitplier,
            embedding_dim=embedding_dim,
            conditioning_option=conditioning_option)
        self.embedder = nn.Linear(self.n_classes, embedding_dim, bias=False)
Exemple #10
0
def test_retrieve():
    dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}

    val = retrieve(dol, "a")
    ref = [1, 2]
    assert val == ref

    val = retrieve(dol, "a/0")
    ref = 1
    assert val == ref

    val = retrieve(dol, "b/c/d")
    ref = 1
    assert val == ref
Exemple #11
0
    def __init__(self, config, root, model, datasets, **kwargs):
        self.model = model

        self.optim = torch.optim.Adam(self.model.parameters(),
                                      lr=0.0001)  #, momentum=0.9)

        self.n_start = retrieve(config, 'model_pars/start_size')
        self.t_offset = retrieve(config, 'model_pars/prediction_offset')
        self.behavior_size = retrieve(config, 'model_pars/behavior_size')

        self.train_stage_1 = retrieve(config, 'training/stage_1')
        self.train_stage_2 = retrieve(config, 'training/stage_2')

        super().__init__(config, root, model, datasets, **kwargs)
Exemple #12
0
def test_retrieve_ef():
    dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}

    val = retrieve(dol, "a", expand=False)
    ref = [1, 2]
    assert val == ref

    val = retrieve(dol, "a/0", expand=False)
    ref = 1
    assert val == ref

    val = retrieve(dol, "b/c/d", expand=False)
    ref = 1
    assert val == ref
Exemple #13
0
    def __init__(self, config):
        super().__init__()
        possible_resnets = {
            'resnet18': models.resnet18,
            'resnet34': models.resnet34,
            'resnet50': models.resnet50,
            'resnet101': models.resnet101,
            'resnet50stylized': models.resnet50,
        }
        from torch.utils import model_zoo
        self.logger = get_logger(self.__class__.__name__)
        self.n_out = retrieve(config, "Model/n_classes")
        self.type = retrieve(config, "Model/type", default='resnet50')
        custom_head = retrieve(config, "Model/custom_head", default=True)
        self.model = possible_resnets[self.type](pretrained=retrieve(
            config, "Model/imagenet_pretrained", default=True))

        if custom_head:
            self.model.fc = nn.Linear(self.model.fc.in_features, self.n_out)

        if self.type in ["resnet50stylized"]:
            self.logger.info(
                "Loading pretrained Resnet-50 trained on stylized ImageNet")
            which_stylized = retrieve(config,
                                      "Model/whichstyle",
                                      default="resnet50_trained_on_SIN")

            self.logger.info("Loading {} from url {}".format(
                which_stylized, STYLE_MODEL_URLS[which_stylized]))
            assert not custom_head
            url = STYLE_MODEL_URLS[which_stylized]
            state = model_zoo.load_url(url)
            # remove the .module in keys of state dict (from DataParallel)
            state_unboxed = dict()
            for k in tqdm(state["state_dict"].keys(), desc="StateDict"):
                state_unboxed[k[7:]] = state["state_dict"][k]
            self.model.load_state_dict(state_unboxed)
            self.logger.info(
                "Loaded resnet50 trained on stylized ImageNet, version {}".
                format(which_stylized))

        normalize = torchvision.transforms.Normalize(mean=self.mean,
                                                     std=self.std)
        self.image_transform = torchvision.transforms.Compose([
            torchvision.transforms.Lambda(lambda image: F.interpolate(
                image, size=(224, 224), mode="bilinear")),
            torchvision.transforms.Lambda(lambda image: torch.stack(
                [normalize(rescale(x)) for x in image]))
        ])
Exemple #14
0
    def __init__(self, config):
        super().__init__()

        num_layers = retrieve(config, 'model_pars/behavior/num_layers')
        input_size = retrieve(config, 'model_pars/behavior/input_size')
        behavior_size = retrieve(config, 'model_pars/behavior_size')

        self.num_layers = num_layers
        self.behavior_size = behavior_size

        self.rnn = nn.GRU(input_size,
                          behavior_size,
                          num_layers=num_layers,
                          bias=True,
                          batch_first=True)
Exemple #15
0
    def _load(self):
        with open(self.txt_filelist, "r") as f:
            self.relpaths = f.read().splitlines()
            l1 = len(self.relpaths)
            self.relpaths = self._filter_relpaths(self.relpaths)
            print("Removed {} files from filelist during filtering.".format(
                l1 - len(self.relpaths)))
            print("Size of dataset is now {} files.".format(len(
                self.relpaths)))

        if "sub_indices" in self.config:
            self._build_superclasses()

        self.synsets = [p.split("/")[0] for p in self.relpaths]
        self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]

        unique_synsets = np.unique(self.synsets)
        class_dict = dict(
            (synset, i) for i, synset in enumerate(unique_synsets))
        self.class_labels = [class_dict[s] for s in self.synsets]

        self.superclasses = [
            self.synset_classdict[s] if "sub_indices" in self.config else -1
            for s in self.synsets
        ]
        if retrieve(self.config, "use_superclasses", default=False):
            assert "sub_indices" in self.config
            print("Using {} different superclasses of ImageNet.".format(
                len(np.unique(np.array(self.superclasses)))))
            self.class_labels = self.superclasses

        with open(self.human_dict, "r") as f:
            human_dict = f.read().splitlines()
            human_dict = dict(line.split(maxsplit=1) for line in human_dict)

        self.human_labels = [human_dict[s] for s in self.synsets]

        labels = {
            "relpath": np.array(self.relpaths),
            "synsets": np.array(self.synsets),
            "class_label": np.array(self.class_labels),
            "human_label": np.array(self.human_labels),
            "superclass": np.array(self.superclasses),
        }
        self.data = ImagePaths(self.abspaths,
                               labels=labels,
                               size=retrieve(self.config, "size", default=0),
                               random_crop=self.random_crop)
Exemple #16
0
    def __call__(
        self,
        display_selection,
        example_idx,
    ):
        '''Makes sure, that the slider changes the displayed example as given
        through :attr:`example_idx` and that the visualization is done using
        the selected renderer :attr:`display_selection`.
        '''

        print('Called {} Callback with {}'.format(self.name, [
            display_selection,
            example_idx,
        ]))

        # Decide what and how to show
        obj = retrieve(self.dset[example_idx], self.name)

        render, info = RENDERERS[display_selection](obj, self.name)
        info_element = dict2table(info)

        example_body = [
            html.Div(render, className='six columns'),
            html.Div(info_element, className='six columns')
        ]
        return example_body,
Exemple #17
0
    def __init__(self, config):
        super().__init__()
        import torch.backends.cudnn as cudnn
        cudnn.benchmark = True
        self.config = config

        self.in_channel = retrieve(config, "Transformer/in_channel")
        self.n_flow = retrieve(config, "Transformer/n_flow")
        self.depth_submodules = retrieve(config, "Transformer/hidden_depth")
        self.hidden_dim = retrieve(config, "Transformer/hidden_dim")
        modules = [VectorActNorm, DoubleVectorCouplingBlock, Shuffle]
        self.realnvp = EfficientVRNVP(modules,
                                      self.in_channel,
                                      self.n_flow,
                                      self.hidden_dim,
                                      hidden_depth=self.depth_submodules)
Exemple #18
0
    def __init__(self, config):
        self.path = "./data/sprites/"
        self.train_split = self.use_train_split()
        assert self.train_split in [True, False]
        self.size = retrieve(config, "spatial_size", default=64)
        self.raw_data = RawSprites(self.path)

        # note that we only take even character indices. odd character indices
        # differ only in the weapon from the previous even character index.
        # since not all actions show the weapon, this leads to ambiguities.
        self.indices = [
            i for i in range(len(self.raw_data))
            if self.raw_data.load_key("train", i) == self.train_split
            and self.raw_data.load_key("character_idx", i) % 2 == 0
        ]
        self._length = len(self.indices)
        self.labels = {
            "identity":
            np.array([
                self.raw_data.load_key("character_idx", i)
                for i in self.indices
            ]),
            "frame_idx":
            np.array(
                [self.raw_data.load_key("frame_idx", i) for i in self.indices])
        }
Exemple #19
0
def _deep_lod2dol_v3(list_of_nested_things):
    """Turns a list of nested dictionaries into a nested dictionary of lists.
    This function takes care that all leafs of the nested dictionaries are 
    considered as full keys, not only the top level keys.

    .. Note::

        The difference to :func:`deep_lod2dol` is, that the correct type is
        never checked.

    Parameters
    ----------
    list_of_nested_things : list(dict(anything))
        A list of deep dictionaries

    Returns
    -------
    out : dict(anything(list))
        A dict containing lists of leaf entries.
    """

    leaf_keypaths = get_leaf_names(list_of_nested_things[0])

    out = {}
    for key in leaf_keypaths:
        stacked_entry = np.stack(
            [retrieve(d, key) for d in list_of_nested_things])
        set_value(out, key, stacked_entry)

    return out
Exemple #20
0
    def __init__(self, config):
        super().__init__()
        z_dim = self.z_dim = retrieve(config, "Model/z_dim")
        image_size = retrieve(config, 'Model/in_size', default=128)
        use_actnorm = retrieve(config, 'Model/use_actnorm_in_dec', default=False)
        pretrained = retrieve(config, 'Model/pretrained', default=True)
        class_embedding_dim = 1000
        self.extra_z_dims = retrieve(config, "Model/extra_z_dims", default=list())

        self.map_to_class_embedding = ClassUp(z_dim, depth=2, hidden_dim=2*class_embedding_dim,
                                              use_sigmoid=False, out_dim=class_embedding_dim)
        self.decoder = load_variable_latsize_generator(image_size, z_dim,
                                                       pretrained=pretrained,
                                                       use_actnorm=use_actnorm,
                                                       n_class=class_embedding_dim,
                                                       extra_z_dims=self.extra_z_dims)
Exemple #21
0
    def _load(self):
        with open(self.txt_filelist, "r") as f:
            self.relpaths = f.read().splitlines()

        assert len(self.relpaths) == self.expected_length

        self.synsets = [p.split("/")[0] for p in self.relpaths]
        self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]

        unique_synsets = np.unique(self.synsets)
        class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
        self.class_labels = [class_dict[s] for s in self.synsets]

        with open(self.human_dict, "r") as f:
            human_dict = f.read().splitlines()
            human_dict = dict(line.split(maxsplit=1) for line in human_dict)

        self.human_labels = [human_dict[s] for s in self.synsets]

        labels = {
            "relpath": np.array(self.relpaths),
            "synsets": np.array(self.synsets),
            "class_label": np.array(self.class_labels),
            "human_label": np.array(self.human_labels),
        }
        self.data = ImagePaths(self.abspaths,
                               labels=labels,
                               size=retrieve(self.config, "size", default=0),
                               random_crop=self.random_crop)
Exemple #22
0
 def after_step(self, step, results):
     for key in ["step_ops/emb"]:
         result = retrieve(key, results)
         if key in self.data:
             self.data[key] = np.concatenate([self.data[key], result])
         else:
             self.data[key] = result
Exemple #23
0
    def __init__(self, config):
        super().__init__()

        self.enc = BehaviorEncoder(config)
        self.dec = BehaviorDecoder(config)

        self.start_size = retrieve(config, 'model_pars/start_size')
Exemple #24
0
    def _load(self):
        with open(self.get_txt_filelist(), "r") as f:
            self.relpaths = f.read().splitlines()

        assert len(self.relpaths) == self.expected_length

        self.synsets = [p.split("/")[2] for p in self.relpaths]
        self.abspaths = [os.path.join(self.root, p) for p in self.relpaths]

        with open(os.path.join(self.root, "classes.txt"), "r") as f:
            unique_synsets = f.read().splitlines()
            assert len(unique_synsets) == 50

        class_dict = dict(
            (synset, i) for i, synset in enumerate(unique_synsets))
        self.class_labels = [class_dict[s] for s in self.synsets]

        self.human_labels = self.synsets

        labels = {
            "relpath": np.array(self.relpaths),
            "synsets": np.array(self.synsets),
            "class_label": np.array(self.class_labels),
            "human_label": np.array(self.human_labels),
        }
        self.data = ImagePaths(self.abspaths,
                               labels=labels,
                               size=retrieve(self.config, "size", default=0),
                               random_crop=self.random_crop)
Exemple #25
0
    def __init__(self, config):
        self.data = self.get_base_data(config)
        self.size = retrieve(config, "spatial_size", default=32)

        self.rescaler = albumentations.SmallestMaxSize(max_size = self.size)
        self.cropper = albumentations.CenterCrop(height=self.size,width=self.size)
        self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])
Exemple #26
0
 def train_op():
     before = time.time()
     self.optimizer.zero_grad()
     loss.backward()
     self.optimizer.step()
     if retrieve(self.config, "debug_timing", default=False):
         self.logger.info("train step needed {} s".format(time.time() - before))
Exemple #27
0
    def after_epoch(self, epoch):
        """Save csv for reuse and then start the evaluation callbacks

        Parameters
        ----------
        epoch :


        Returns
        -------

        """
        self.save_csv()

        data_out = EvalDataFolder(self.root)

        cb_kwargs = retrieve(self.config,
                             "eval_pipeline/callback_kwargs",
                             default={})

        for n, cb in self.cbacks.items():
            cb_name = "CB: {}".format(n)
            cb_name = "{a}\n{c}\n{a}".format(a="=" * len(cb_name), c=cb_name)
            self.logger.info(cb_name)

            kwargs = cb_kwargs.get(n, {})
            cb(self.root, self.data_in, data_out, self.config, **kwargs)
Exemple #28
0
 def after_step(self, step, results):
     embeddings = retrieve("step_ops/emb", results)
     for i, path in enumerate(self.paths):
         extracted = embeddings[i]
         out_path = path + "_alpha.npy"
         out_path = os.path.join(self.root, out_path)
         os.makedirs(os.path.split(out_path)[0], exist_ok=True)
         np.save(out_path, extracted)
Exemple #29
0
    def prepare_inputs_inplace(self, inputs):
        '''Casts all input to torch Tensor and pushes them to the gpu.'''
        before = time.time()

        inputs = walk(inputs, np2pt, inplace=True)

        if retrieve(self.config, "debug_timing", default=False):
            self.logger.info("prepare of data needed {} s".format(time.time() - before))
Exemple #30
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     greybox_config = self.config["GreyboxModel"]
     self.init_greybox(greybox_config)
     ae_config = self.config["AutoencoderModel"]
     self.init_ae(ae_config)
     self.log_n_samples = retrieve(self.config, "n_samples_logging",
                                   default=3)  # visualize n samples per representation