Beispiel #1
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.time_frame = None
        self.time_frame_counter = 0
        self.travers_order = None
        self.init_explicit_modalities()

        self.graph = self.init_graph()
        self.graph_travers_order = self.get_graph_traverse_order()

        self.models = {}
        self.init_models_and_adjust_sizes()

        self.init_remaining_modalities()

        self.losses = {}
        self.init_losses()

        # Save mermaidjs description of graph to log folder
        if not os.path.exists(Global_Cfgs().log_folder):
            os.makedirs(Global_Cfgs().log_folder, exist_ok=True)
        fn = 'mermaidjs_{ds_name}_{graph_name}_{exp_name}_{scene_name}.txt'\
            .format(ds_name=self.get_cfgs('dataset_name'),
                    graph_name=self.get_name(),
                    exp_name=self.experiment_name,
                    scene_name=self.scene_cfgs['name'])
        mermain_fn = os.path.join(Global_Cfgs().log_folder, fn)
        with open(mermain_fn, 'w') as mermain_file:
            mermain_file.write(self.convert_to_mermaidjs())
        Console_UI().inform_user(f'Wrote mermaidjs config to {mermain_fn}')

        self.exception_counter = 0
Beispiel #2
0
def main():
    cfgs = Global_Cfgs()
    scenario = Scenario(scenario_name=cfgs.get('scenario'))

    start_scene = cfgs.get('start_scene')
    try:
        for scene in iter(scenario):
            if start_scene is None or scene.scene_name.strip().lower(
            ) == start_scene.strip().lower():
                start_scene = None
                scene.run_scene()
            else:
                Console_UI().inform_user(
                    f'Skip \'{scene.scene_name}\' - waiting for \'{start_scene}\''
                )

    except RuntimeError as error:
        Console_UI().warn_user(error)
        Console_UI().inform_user("\n\n Traceback: \n")

        traceback.print_exc()
    except KeyboardInterrupt:
        Console_UI().inform_user(
            f'\nInterrupted by ctrl+c - stopped ad "{scene.scene_name}"')
    else:
        Console_UI().inform_user("Done with all scenarios!")

    Console_UI().inform_user('To view results, checkout the tensorboard:')
    Console_UI().inform_user(
        f'tensorboard --logdir /media/max/HD_1_3TB/log/{cfgs.sub_log_path}/tensorboard'
    )
Beispiel #3
0
    def __init__(
        self,
        neural_net_name,
        neural_net_cfgs,
        layers,
        optimizer_type: str = 'sgd',
        input_name: str = '',
        output_name: str = '',
        input_shape: list = [],
        output_shape: list = [],
        load_from_batch=True,
        add_noise=False,
    ):
        super().__init__()
        self.neural_net_name = neural_net_name
        self.neural_net_cfgs = neural_net_cfgs
        self.input_name = input_name
        self.output_name = output_name
        self.input_shape = input_shape
        self.output_shape = output_shape
        self.layers = layers
        self.optimizer_type = optimizer_type
        self.add_noise = add_noise
        self.load_from_batch = load_from_batch
        self.weighted_average_parameters = None
        self.weighted_average_parameters_counter = 0
        self.batch_norm_update_counter = 0
        self.momenta = {}

        if self.load_from_batch:
            self.forward = self.forward_from_batch
        else:
            self.forward = self.forward_data

        if Global_Cfgs().get('DEVICE_BACKEND') == 'cuda':
            self.layers.cuda()
            self.layers = nn.DataParallel(layers)

        self.network_memory_usage = None
        try:
            self.network_memory_usage = summarizeModelSize(
                model=layers,
                input_size=(*self.input_shape, ),
                device=Global_Cfgs().get('DEVICE_BACKEND'),
            )
        except Exception as e:
            Console_UI().warn_user(
                f'Failed to get size for {neural_net_name}: {e}')
            pass

        Console_UI().debug(self.layers)
        self.optimizer = None
        self.optimizer = self.get_optimizer()

        self.load()
Beispiel #4
0
    def __init__(self, fake_values={}):
        init_values = {
            'DEVICE_BACKEND': 'gpu',
            'IMAGE_BACKEND': 'cv2',
            'output_name': 'test_output',
            'target_name': 'test_target',
            'ignore_index': -100,
        }
        init_values.update(fake_values)

        if Global_Cfgs in Singleton._instances:
            cfg = Global_Cfgs()
            cfg.cfgs = init_values
        else:
            Global_Cfgs(test_mode=True, test_init_values=init_values)
Beispiel #5
0
    def gradient_penalty(self, neural_net, real_data, fake_data):
        batch_size = min(real_data.shape[0], fake_data.shape[0])
        real_data = real_data[:batch_size, :]
        fake_data = fake_data[:batch_size, :]

        alpha = torch.rand(batch_size, 1)
        alpha = alpha.expand_as(real_data.view(real_data.shape[0], -1)).view_as(real_data)
        if self.use_cuda:
            alpha = alpha.cuda()

        interpolates = alpha * real_data + ((1 - alpha) * fake_data)

        if Global_Cfgs().get('DEVICE_BACKEND') == 'cuda':
            interpolates = interpolates.cuda()

        interpolates = autograd.Variable(interpolates, requires_grad=True)
        disc_interpolates = neural_net(interpolates)

        gradients = autograd.grad(outputs=disc_interpolates,
                                  inputs=interpolates,
                                  grad_outputs=torch.ones(disc_interpolates.size()).cuda()
                                  if self.use_cuda else torch.ones(disc_interpolates.size()),
                                  create_graph=True,
                                  retain_graph=True,
                                  only_inputs=True)[0]

        gradient_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean() * 10
        return gradient_penalty
Beispiel #6
0
 def __init__(self, *args, **kwargs):
     if 'pseudo_mask_margin' in kwargs:
         self.pseudo_mask_margin = kwargs['pseudo_mask_margin']
         del kwargs['pseudo_mask_margin']
     else:
         self.pseudo_mask_margin = Global_Cfgs().get(
             'pseudo_mask_margin', 0.5)
     super().__init__(*args, **kwargs)
     self.margin = -np.log(1. / self.signal_to_noise_ratio - 1) / 2
Beispiel #7
0
 def get_cfgs(self, name, default=None):
     # TODO: The get_cfgs probably be merged into one spot
     if name in self.task_cfgs:
         return self.task_cfgs[name]
     if name in self.task_cfgs['apply']:
         return self.task_cfgs['apply'][name]
     if name in self.scene_cfgs:
         return self.scene_cfgs[name]
     if name in self.scenario_cfgs:
         return self.scenario_cfgs[name]
     return Global_Cfgs().get(name, default=default)
Beispiel #8
0
 def get_cfgs(self, name, default=None):
     user_cfgs = Global_Cfgs().get(name, default=None)
     if user_cfgs is not None:
         return user_cfgs
     if name in self.modality_cfgs:
         return self.modality_cfgs[name]
     if name in self.experiment_cfgs:
         return self.experiment_cfgs[name]
     if name in self.dataset_cfgs:
         return self.dataset_cfgs[name]
     return default
Beispiel #9
0
 def get_cfgs(self, name, default=None):
     if name in self.loss_cfgs:
         return self.loss_cfgs[name]
     if name in self.graph_cfgs:
         return self.graph_cfgs[name]
     if name in self.task_cfgs['apply']:
         return self.task_cfgs['apply'][name]
     if name in self.scene_cfgs:
         return self.scene_cfgs[name]
     if name in self.scenario_cfgs:
         return self.scenario_cfgs[name]
     return Global_Cfgs().get(name, default=default)
Beispiel #10
0
        def hook(module, input, output):
            class_name = str(module.__class__).split(".")[-1].split("'")[0]
            module_idx = len(summary)

            m_key = "%s-%i" % (class_name, module_idx + 1)
            summary[m_key] = OrderedDict()
            summary[m_key]["input_shape"] = list(input[0].size())
            summary[m_key]["input_shape"][0] = Global_Cfgs().get('batch_size')
            if isinstance(output, (list, tuple)):
                summary[m_key]["output_shape"] = [[-1] + list(o.size())[1:] for o in output]
            else:
                summary[m_key]["output_shape"] = list(output.size())
                summary[m_key]["output_shape"][0] = Global_Cfgs().get('batch_size')

            params = 0
            if hasattr(module, "weight") and hasattr(module.weight, "size"):
                params += torch.prod(torch.LongTensor(list(module.weight.size())))
                summary[m_key]["trainable"] = module.weight.requires_grad
            if hasattr(module, "bias") and hasattr(module.bias, "size"):
                params += torch.prod(torch.LongTensor(list(module.bias.size())))
            summary[m_key]["nb_params"] = params
Beispiel #11
0
 def get_iterator(self):
     if self.dataloader_iterator is None:
         loader = DataLoader(
             dataset=self,
             batch_size=
             1,  # The batch size is decided when generating the bins
             shuffle=False,  # The bin generation shuffles
             num_workers=Global_Cfgs().get('num_workers'),
             collate_fn=collate_factory(
                 keys_2_ignore=list(self.get_batch_defaults([0]).keys())),
         )
         self.dataloader_iterator = iter(loader)
     return self.dataloader_iterator
Beispiel #12
0
    def init_modality(self, modality_name: str, modality_cfgs: dict = None):
        modality_name = modality_name.lower()
        assert (modality_cfgs is not None
                ), 'modality_cfgs should not be None in %s' % (modality_name)

        start_time = time.time()
        Modality, content, dictionary = get_modality_and_content(
            annotations=self.annotations,
            modality_name=modality_name,
            modality_cfgs=modality_cfgs,
            ignore_index=
            -100  # The -100 is defined in the loss_cfgs and not available here :-(
        )

        modality = Modality(
            dataset_name=self.dataset_name,
            dataset_cfgs=self.dataset_cfgs,
            experiment_name=self.experiment_name,
            experiment_cfgs=self.experiment_cfgs,
            modality_name=modality_name,
            modality_cfgs=modality_cfgs,
            content=content,
            dictionary=dictionary,
        )

        if modality.is_explicit_modality():
            self.explicit_modalities[modality_name] = modality
            if modality.is_input_modality():
                self.explicit_input_modalities[modality_name] = modality
            elif modality.is_output_modality():
                self.explicit_output_modalities[modality_name] = modality
            else:
                raise BaseException(
                    'Explicit Modalities should either be input or output')
        elif modality.is_implicit_modality():
            self.implicit_modalities[modality_name] = modality

        # Add explicit and implicit modalities
        # Todo - Ali: why do we need to have this split? When do we have the case were a modality is neither
        self.modalities.update(self.explicit_modalities)
        self.modalities.update(self.implicit_modalities)

        if not Global_Cfgs().get('silent_init_info'):
            Console_UI().inform_user(
                info='Initializing %s modality in %s in %d milliseconds' %
                (modality_name, self.get_name(), 1000 *
                 (time.time() - start_time)),
                debug=(modality_cfgs),
            )
Beispiel #13
0
    def get_content_statistics(self, labels=None):
        if labels is None:
            labels = self.labels[self.labels != self.ignore_index]

        if not self.get_cfgs('to_each_view_its_own_label'):
            # labels = [l.to_list()[0] for idx, l in labels.groupby(level=0)]
            # Better performance:
            labels = labels[labels.index.get_level_values(level=1) == 0]

        statistics = {}
        c = Counter(labels)
        statistics['labels'] = np.array([-1, 0, 1])
        statistics['label_count'] = np.array([c[-1], c[0], c[1]])
        statistics['label_likelihood'] = statistics['label_count'] / np.sum(
            statistics['label_count'])

        loss_type = Global_Cfgs().get('loss_weight_type', None)
        if loss_type == 'max':
            # Limit the loss to max 1/2:th of the total when there are > 2000 observations
            # We've found that using np.sqr or smaller limitations causes strange collapses
            # in the multi-bipolar estimates
            total_with_label = sum(
                [statistics['label_count'][i] for i in [0, 2]])
            max_for_loss = max(1000, total_with_label / 3)
            statistics['loss_weight'] = np.array(
                [1 / min(c[i] + 1, max_for_loss) for i in [-1, 1]])
        elif loss_type == 'sqrt':
            # Square root loss
            statistics['loss_weight'] = np.array(
                [1 / np.sqrt(c[i] + 1) for i in [-1, 1]])
        else:
            # Basic loss
            statistics['loss_weight'] = np.array(
                [1 / (c[i] + 1) for i in [-1, 1]])

        # Normalize the weights so that sum equals 1
        statistics['loss_weight'] /= np.sum(statistics['loss_weight'])
        statistics['num_classes'] = self.get_num_classes()

        label_informativeness = {}
        for label_index, label_likelihood in zip(
                statistics['labels'], statistics['label_likelihood']):
            label_informativeness[label_index] = (
                1 - label_likelihood) * self.signal_to_noise_ratio

        statistics['label_informativeness'] = label_informativeness

        return statistics
Beispiel #14
0
    def get_cfgs(self, name, default=None):
        try:
            if name in self.graph_cfgs:
                return self.graph_cfgs[name]
            if name in self.task_cfgs:
                return self.task_cfgs[name]
            if name in self.task_cfgs['apply']:
                return self.task_cfgs['apply'][name]
            if name in self.scene_cfgs:
                return self.scene_cfgs[name]
            if name in self.scenario_cfgs:
                return self.scenario_cfgs[name]
        except TypeError as e:
            Console_UI().inform_user(self.graph_cfgs)
            Console_UI().inform_user(self.task_cfgs)
            Console_UI().inform_user(self.scene_cfgs)
            Console_UI().inform_user(self.scenario_cfgs)
            raise TypeError(f'Error during {self.get_name()}: {e}')

        return Global_Cfgs().get(name)
Beispiel #15
0
 def get_cfgs(self, name, default=None):
     if (name in self.scenario_cfgs):
         return self.scenario_cfgs[name]
     return Global_Cfgs().get(name, default)
Beispiel #16
0
    def __init__(
        self,
        graph_name,
        experiment_set,
        task_cfgs,
        scene_cfgs,
        scenario_cfgs,
    ):
        self.graph_name = graph_name
        self.task_cfgs = task_cfgs
        self.scene_cfgs = scene_cfgs
        self.scenario_cfgs = scenario_cfgs
        self.experiment_set = experiment_set
        self.experiment_name = self.experiment_set.get_name()

        self.graph_cfgs = self.get_graph_cfgs(self.graph_name)
        self.classification = self.get_cfgs('classification', default=False)
        self.reconstruction = self.get_cfgs('reconstruction', default=False)
        self.identification = self.get_cfgs('identification', default=False)
        self.regression = self.get_cfgs('regression', default=False)
        self.pi_model = self.get_cfgs('pi_model', default=False)
        self.real_fake = self.get_cfgs('real_fake', default=False)
        self.optimizer_type = self.get_cfgs('optimizer_type')

        if not Global_Cfgs().get('silent_init_info'):
            UI = Console_UI()
            UI.inform_user(
                info=[
                    'explicit experiment modalities',
                    list(self.get_experiment_explicit_modalities().keys())
                ],
                debug=self.get_experiment_explicit_modalities(),
            )
            UI.inform_user(
                info=[
                    'implicit experiment modalities',
                    list(self.get_experiment_implicit_modalities().keys())
                ],
                debug=self.get_experiment_implicit_modalities(),
            )
            UI.inform_user(
                info=[
                    'explicit graph modalities',
                    list(self.get_graph_specific_explicit_modalities().keys())
                ],
                debug=self.get_graph_specific_explicit_modalities(),
            )
            UI.inform_user(
                info=[
                    'implicit graph modalities',
                    list(self.get_graph_specific_implicit_modalities().keys())
                ],
                debug=self.get_graph_specific_implicit_modalities(),
            )
            UI.inform_user(
                info=[
                    'explicit models',
                    list(self.get_explicit_models().keys())
                ],
                debug=self.get_explicit_models(),
            )
            UI.inform_user(
                info=[
                    'implicit models',
                    list(self.get_implicit_models().keys())
                ],
                debug=self.get_implicit_models(),
            )
Beispiel #17
0
 def get_cfgs(self, name, default=None):
     if ('dataset_cfgs' in self.__dict__ and name in self.dataset_cfgs):
         return self.dataset_cfgs[name]
     return Global_Cfgs().get(name, default)
Beispiel #18
0
 def get_forward_noise(self):
     if not self.training:
         return 0.0
     if np.random.rand() < 0.5:
         return 0.0
     return Global_Cfgs().forward_noise * np.random.rand()
Beispiel #19
0
def summarizeModelSize(model, input_size, device: str, print_summary=False):

    def register_hook(module):

        def hook(module, input, output):
            class_name = str(module.__class__).split(".")[-1].split("'")[0]
            module_idx = len(summary)

            m_key = "%s-%i" % (class_name, module_idx + 1)
            summary[m_key] = OrderedDict()
            summary[m_key]["input_shape"] = list(input[0].size())
            summary[m_key]["input_shape"][0] = Global_Cfgs().get('batch_size')
            if isinstance(output, (list, tuple)):
                summary[m_key]["output_shape"] = [[-1] + list(o.size())[1:] for o in output]
            else:
                summary[m_key]["output_shape"] = list(output.size())
                summary[m_key]["output_shape"][0] = Global_Cfgs().get('batch_size')

            params = 0
            if hasattr(module, "weight") and hasattr(module.weight, "size"):
                params += torch.prod(torch.LongTensor(list(module.weight.size())))
                summary[m_key]["trainable"] = module.weight.requires_grad
            if hasattr(module, "bias") and hasattr(module.bias, "size"):
                params += torch.prod(torch.LongTensor(list(module.bias.size())))
            summary[m_key]["nb_params"] = params

        if (not isinstance(module, nn.Sequential) and not isinstance(module, nn.ModuleList) and not (module == model)):
            hooks.append(module.register_forward_hook(hook))

    device = device.lower()
    assert device in [
        "cuda",
        "cpu",
    ], "Input device is not valid, please specify 'cuda' or 'cpu'"

    if device == "cuda" and torch.cuda.is_available():
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

    # multiple inputs to the network
    if isinstance(input_size, tuple):
        input_size = [input_size]

    # batch_size of 2 for batchnorm
    x = [torch.rand(Global_Cfgs().get('batch_size'), *in_size).type(dtype) for in_size in input_size]

    # create properties
    summary = OrderedDict()
    hooks = []

    # register hook
    model.apply(register_hook)

    # make a forward pass
    model(*x)

    # remove these hooks
    for h in hooks:
        h.remove()

    def altPrint(output: str):
        if print_summary:
            Console_UI().inform_user(output)

    altPrint("----------------------------------------------------------------")
    line_new = "{:>20}  {:>25} {:>15}".format("Layer (type)", "Output Shape", "Param #")
    altPrint(line_new)
    altPrint("================================================================")

    total_params = torch.tensor(0)
    total_output = 0
    trainable_params = 0
    for layer in summary:
        # input_shape, output_shape, trainable, nb_params
        line_new = "{:>20}  {:>25} {:>15}".format(
            layer,
            str(summary[layer]["output_shape"]),
            "{0:,}".format(summary[layer]["nb_params"]),
        )
        total_params += summary[layer]["nb_params"]
        total_output += np.prod(summary[layer]["output_shape"])
        if "trainable" in summary[layer]:
            if summary[layer]["trainable"] is True:
                trainable_params += summary[layer]["nb_params"]
        altPrint(line_new)

    # assume 4 bytes/number (float on cuda).
    total_input_size = abs(np.prod(input_size) * Global_Cfgs().get('batch_size') * 4. / (1024**2.))
    total_output_size = abs(2. * total_output * 4. / (1024**2.))  # x2 for gradients
    total_params_size = abs(total_params.numpy() * 4. / (1024**2.))
    total_size = total_params_size + total_output_size + total_input_size

    altPrint("================================================================")
    altPrint("Total params: {0:,}".format(total_params))
    altPrint("Trainable params: {0:,}".format(trainable_params))
    altPrint("Non-trainable params: {0:,}".format(total_params - trainable_params))
    altPrint("----------------------------------------------------------------")
    altPrint("Input size (MB): %0.2f" % total_input_size)
    altPrint("Forward/backward pass size (MB): %0.2f" % total_output_size)
    altPrint("Params size (MB): %0.2f" % total_params_size)
    altPrint("Estimated Total Size (MB): %0.2f" % total_size)
    altPrint("----------------------------------------------------------------")

    return {'total': total_size, 'param': total_params_size}
Beispiel #20
0
    def run_scene(self, start_epoch=0):
        logged_memory_usage = False
        ui = Console_UI()
        ui.overall_total_epochs = self.epochs
        ui.overall_total_repeats = self.repeat

        Global_Cfgs().set_forward_noise(
            self.get_cfgs('forward_noise', default=0))
        for r in range(0, self.repeat):
            ui.overall_repeat = r
            if (self.stochastic_weight_averaging and r > 0):
                self.tasks[self.main_task].stochastic_weight_average()

            for e in range(0, self.epochs):
                ui.overall_epoch = e
                if start_epoch > e + r * self.epochs:
                    Scene.iteration_counter += self.epoch_size
                else:
                    for task in self.tasks.values():
                        task.update_learning_rate(self.get_learning_rate(e))

                    for _ in range(self.epoch_size):
                        for key, task in self.tasks.items():
                            if self.should_task_run(task_name=key, task=task):
                                task.step(
                                    iteration_counter=Scene.iteration_counter,
                                    scene_name=self.scene_name)
                        Scene.iteration_counter += 1

                        if logged_memory_usage is False:
                            for key in self.tasks.keys():
                                task = self.tasks[key]
                                memory_usage = task.get_memory_usage_profile()
                                File_Manager().write_usage_profile(
                                    scene_name=self.scene_name,
                                    task=key,
                                    memory_usage=memory_usage,
                                )
                                ui.inform_user(
                                    f'\n Memory usage for {self.scene_name}::{key}\n'
                                )
                                ui.inform_user(memory_usage)
                            logged_memory_usage = True

                    for task in self.tasks.values():
                        task.save(scene_name='last')
                        # Not really helping with just emptying cache - we need to add something more
                        # removing as this may be the cause for errors
                        # torch.cuda.empty_cache()
        ui.reset_overall()

        # Note that the evaluation happens after this step and therefore averaging may hur the performance
        if self.stochastic_weight_averaging_last:
            self.tasks[self.main_task].stochastic_weight_average()
            for task in self.tasks.values():
                task.save(scene_name='last')

        for task in self.tasks.values():
            task.validate(iteration_counter=Scene.iteration_counter,
                          scene_name=self.scene_name)
            task.test(iteration_counter=Scene.iteration_counter,
                      scene_name=self.scene_name)

        # Save all tasks before enterering the next scene
        for task in self.tasks.values():
            task.save(scene_name=self.scene_name)
            [g.dropModelNetworks() for g in task.graphs.values()]
Beispiel #21
0
 def get_cfgs(self, name, default=None):
     if name in self.experiment_cfgs:
         return self.experiment_cfgs[name]
     if name in self.dataset_cfgs:
         return self.dataset_cfgs[name]
     return Global_Cfgs().get(name, default)