Exemplo n.º 1
0
    def first_time(self, options, datasets):
        # here we only want to collect the kernels a single time per epoch, so fix the dataset/split names
        if self.dataset_name is None or self.split_name is None:
            self.dataset_name, self.split_name = find_default_dataset_and_split_names(
                datasets,
                default_dataset_name=self.dataset_name,
                default_split_name=self.split_name)

            # set the default parameter of the graph
            config_path = options['workflow_options']['sql_database_view_path']

            table_names = [
                self.table_name_activation, self.table_name_gradient
            ]
            for table_name in table_names:
                update_json_config(
                    config_path, {
                        table_name: {
                            'default': {
                                'X Axis': 'epoch',
                                'Y Axis': 'metric_value',
                                'Group by': 'layer',
                                'discard_axis_y': 'epoch',
                                'discard_axis_x': 'metric_value',
                                'discard_group_by': 'epoch',
                                'number_of_columns': 2,
                            }
                        }
                    })
Exemplo n.º 2
0
 def first_epoch(self, options):
     # set the default parameter of the graph
     config_path = options['workflow_options']['sql_database_view_path']
     update_json_config(config_path, {
         self.table_name: {
             'default': {
                 'with_column_title_rotation': '0',
             }
         }
     })
     self.init_done = True
Exemplo n.º 3
0
 def first_epoch(self, options):
     # set the default parameter of the graph
     config_path = options['workflow_options']['sql_database_view_path']
     update_json_config(
         config_path, {
             self.table_name: {
                 'default': {
                     'X Axis': 'epoch',
                     'Y Axis': 'value',
                     'Group by': 'metric',
                     'discard_axis_y': 'epoch',
                     'discard_axis_x': 'value',
                     'discard_group_by': 'epoch',
                 }
             }
         })
     self.init_done = True
Exemplo n.º 4
0
    def __call__(self, options, history, model, losses, outputs, datasets,
                 datasets_infos, callbacks_per_batch, **kwargs):

        logger.info('started CallbackExportSamples.__call__')
        device = options['workflow_options']['device']

        if not self.reporting_config_exported:
            # export how the samples should be displayed by the reporting
            config_path = options['workflow_options']['sql_database_view_path']
            update_json_config(
                config_path, {
                    self.table_name: {
                        'data': {
                            'keep_last_n_rows':
                            self.reporting_config_keep_last_n_rows,
                            'subsampling_factor':
                            self.reporting_config_subsampling_factor,
                        },
                        'default': {
                            'Scatter X Axis': self.reporting_scatter_x,
                            'Scatter Y Axis': self.reporting_scatter_y,
                            'Color by': self.reporting_color_by,
                            'Display with': self.reporting_display_with,
                            'Binning X Axis': self.reporting_binning_x_axis,
                            'Binning selection':
                            self.reporting_binning_selection,
                        }
                    }
                })
            self.reporting_config_exported = True

        sql_database = options['workflow_options']['sql_database']
        if self.clear_previously_exported_samples:
            cursor = sql_database.cursor()
            table_truncate(cursor, self.table_name)
            sql_database.commit()

            # also remove the binary/image store
            root = os.path.dirname(
                options['workflow_options']['sql_database_path'])
            create_or_recreate_folder(
                os.path.join(root, 'static', self.table_name))

        sql_table = reporting.TableStream(cursor=sql_database.cursor(),
                                          table_name=self.table_name,
                                          table_role='data_samples')

        logger.info(f'export started..., N={self.max_samples}')
        for dataset_name, dataset in datasets.items():
            root = os.path.join(
                options['workflow_options']['current_logging_directory'],
                'static', self.table_name)
            if not os.path.exists(root):
                utilities.create_or_recreate_folder(root)

            for split_name, split in dataset.items():
                exported_cases = []
                trainer.eval_loop(
                    device,
                    dataset_name,
                    split_name,
                    split,
                    model,
                    losses[dataset_name],
                    history=None,
                    callbacks_per_batch=callbacks_per_batch,
                    callbacks_per_batch_loss_terms=[
                        functools.partial(
                            callbacks_per_loss_term,
                            root=options['workflow_options']
                            ['current_logging_directory'],
                            datasets_infos=datasets_infos,
                            loss_terms_inclusion=self.loss_terms_inclusion,
                            feature_exclusions=self.feature_exclusions,
                            dataset_exclusions=self.dataset_exclusions,
                            split_exclusions=self.split_exclusions,
                            exported_cases=exported_cases,
                            max_samples=self.max_samples,
                            epoch=len(history),
                            sql_table=sql_table,
                            format=self.format,
                            select_fn=self.select_sample_to_export)
                    ])

        sql_database.commit()
        logger.info('successfully completed CallbackExportSamples.__call__!')
Exemplo n.º 5
0
    def __call__(self, options, history, model, losses, outputs, datasets,
                 datasets_infos, callbacks_per_batch, **kwargs):
        logger.info('CallbackReportingModelSummary exporting model...')
        if self.split_name is None or self.dataset_name is None:
            self.dataset_name, self.split_name = find_default_dataset_and_split_names(
                datasets,
                default_dataset_name=self.dataset_name,
                default_split_name=self.split_name)

            if self.split_name is None or self.dataset_name is None:
                # no suitable dataset name
                return

        table_name = 'model_summary'
        if not self.reporting_config_exported:
            self.reporting_config_exported = True
            config_path = options['workflow_options']['sql_database_view_path']
            update_json_config(config_path, {
                table_name: {
                    'default': {
                        'with_column_title_rotation': '0',
                    }
                }
            })

        batch = next(iter(datasets[self.dataset_name][self.split_name]))
        batch['split_name'] = self.split_name
        device = options['workflow_options']['device']
        batch = utilities.transfer_batch_to_device(batch, device=device)
        summary, total_output_size, total_params_size, total_params, trainable_params = model_summary_base(
            model, batch)
        module_to_name = collect_hierarchical_module_name(
            type(model).__name__, model)

        layer_name = []
        input_shape = []
        output_shape = []
        nb_params = []
        nb_trainable_params = []
        for module, values in summary.items():
            module_name = module_to_name.get(module)
            if module_name is None:
                module_name = str(module)

            layer_name.append(module_name)
            input_shape.append(str(values['input_shape']))
            output_shape.append(str(values['output_shape']))
            nb_params.append(str(values['nb_params']))
            nb_trainable_params.append(str(values['total_trainable_params']))

        batch = collections.OrderedDict([
            ('layer name', layer_name),
            ('input_shape', input_shape),
            ('output_shape', output_shape),
            ('parameters', nb_params),
            ('trainable parameters', nb_trainable_params),
        ])

        preamble = html_list([
            f'Total parameters: {total_params / 1000000:.2f}M',
            f'Trainable parameters: {trainable_params / 1000000:.2f}M',
            f'Non-trainable parameters: {(total_params - trainable_params)}',
            f'Forward/backward pass size: {total_output_size:.2f} MB',
            f'Params size: {total_params_size:.2f} MB'
        ],
                             header='Model infos')

        export_table(options,
                     table_name,
                     batch,
                     table_role='data_tabular',
                     clear_existing_data=True,
                     table_preamble=preamble)

        logger.info('CallbackReportingModelSummary exporting model done!')