Beispiel #1
0
    def dump_results_dict(cls, results_dict, log_dir, mode='train'):
        """ Dump results dictionary in json file corresponding to the
        train or test results in log_dir.
        """
        assert mode in ['train', 'test'], 'mode should be "train" or "test"...'
        if mode == 'train':
            results_json_filename = cls.train_results_json_filename
        else:
            results_json_filename = cls.test_results_json_filename

        results_json = os.path.join(log_dir, results_json_filename)
        json_dump(results_dict, results_json)
Beispiel #2
0
    def log_additional_info(self):
        """ Log additional information to self.results_json
        """
        if not self.params['additional_info']: # empty list
            return

        results_dict = json_load(self.results_json)

        if 'sparsity' in self.params['additional_info']:
            results_dict[self.params['model_name']]['sparsity'] = \
                                        '{:d}'.format(self.net.compute_sparsity())

        if 'lipschitz_bound' in self.params['additional_info']:
            results_dict[self.params['model_name']]['lipschitz_bound'] = \
                                        '{:.3f}'.format(self.net.lipschitz_bound())

        json_dump(results_dict, self.results_json)
Beispiel #3
0
    def init_json(self):
        """ Init json file for train/test results.
        """
        # initialize/verify json log file
        self.results_json = os.path.join(self.params['log_dir'],
                                        self.results_json_filename)

        if not os.path.isfile(self.results_json):
            results_dict = {}
        else:
            results_dict = json_load(self.results_json)

        if self.params['model_name'] not in results_dict:
            results_dict[self.params['model_name']] = {} # initialize model log

        # add minimal information for sorting models in results_json file
        if self.sorting_key not in results_dict[self.params['model_name']]:
            results_dict[self.params['model_name']][self.sorting_key] = 0.

        json_dump(results_dict, self.results_json)

        comb_list = list(itertools.product(['latest', 'best'], ['train', 'valid'],['acc', 'loss']))
        self.info_list = ['_'.join(k) for k in comb_list] + ['test_acc', 'test_loss']
Beispiel #4
0
    def update_json(self, info, value):
        """ Update json file with latest/best validation/test accuracy/loss, if training,
        and with test accuracy otherwise

        Args:
            info: e.g. 'latest_validation_loss'
        """
        assert info in self.info_list, f'{info} should be in {self.info_list}...'

        # save in json
        results_dict = json_load(self.results_json)

        if isinstance(value, dict):
            if info not in self.params["model_name"]:
                results_dict[self.params["model_name"]][info] = {}
            for key, val in value.items():
                results_dict[self.params["model_name"]][info][key] = float('{:.3f}'.format(val))
        else:
            results_dict[self.params["model_name"]][info] = float('{:.3f}'.format(value))

        sorted_acc = sorted(results_dict.items(), key=lambda kv : kv[1][self.sorting_key], reverse=True)
        sorted_results_dict = collections.OrderedDict(sorted_acc)

        json_dump(sorted_results_dict, self.results_json)
Beispiel #5
0
    for mode in ['train', 'test']:
        if mode == 'train':
            base_json_filename = Project.train_results_json_filename.split('.')[0]
            sorting_key = Project.train_sorting_key
        else:
            base_json_filename = Project.test_results_json_filename.split('.')[0]
            sorting_key = Project.test_sorting_key

        dictio = {}
        for log_dir in [args.log_dir1, args.log_dir2]:
            base_json_path = os.path.join(log_dir, base_json_filename)
            json_path = None
            if os.path.isfile(base_json_path + '_merged.json'):
                json_path = base_json_path + '_merged.json'
            elif os.path.isfile(base_json_path + '.json'):
                json_path = base_json_path + '.json'
            else:
                raise ValueError(f'Did not find file {base_json_path}[.json][_merged.json] ...')

            print(f'Found file {json_path}')
            # merge with simple_json if no merged.json file seen, otherwise merge with merged.json
            results_dict = json_load(json_path)
            dictio = {**dictio, **results_dict}

        assert len(dictio) > 0
        sorted_results = sorted(dictio.items(), key=lambda kv : kv[1][sorting_key], reverse=True)
        sorted_results_dict = collections.OrderedDict(sorted_results)

        merged_json_path = os.path.join(args.log_dir2, base_json_filename + '_merged.json')
        json_dump(sorted_results_dict, merged_json_path)
                        type=str,
                        help='Key to sort json file by.')
    order_choices = {'ascending', 'descending'}
    parser.add_argument('--order',
                        metavar='STR',
                        choices=order_choices,
                        default='descending',
                        help=f'Sorting order. Choices: {str(order_choices)}')
    args = parser.parse_args()

    results_dict = json_load(args.json_file_path)
    first = next(iter(results_dict))
    assert not isinstance(results_dict[first][args.key], dict), \
                            'model_run[key] should be int or float.'

    sorted_results = sorted(results_dict.items(),
                            key=lambda kv: kv[1][args.key],
                            reverse=(args.order == 'descending'))
    sorted_results_dict = collections.OrderedDict(sorted_results)

    path_split = args.json_file_path.split('/')
    log_dir, json_file = '/'.join(path_split[:-1]), path_split[-1]
    sorted_results_json = os.path.join(log_dir,
                                       f'{args.key}_sorted_' + json_file)

    json_dump(sorted_results_dict, sorted_results_json)

    print(
        f'=> Results sorted by {args.key} succesfully written to {sorted_results_json}.'
    )
    path_split = args.json_file_path.split('/')
    log_dir, json_file = '/'.join(path_split[:-1]), path_split[-1]
    avg_results_json = os.path.join(log_dir, '_'.join(['avg', json_file]))

    results_dict = json_load(args.json_file_path)
    num_runs = len(results_dict)
    avg_results_dict = {}

    first_run = next(iter(results_dict))
    # results dictionaries have, at most, depth = 2
    for key, val in results_dict[first_run].items():
        # (key, val) pairs from first run results
        if isinstance(val, dict):
            avg_results_dict[key] = {}
            for sub_key, sub_val in val.items():
                avg_val = 0.
                for run_dict in results_dict.values():
                    avg_val += float(run_dict[key][sub_key])
                avg_results_dict[key][sub_key] = float('{:.3f}'.format(
                    avg_val / num_runs))
        else:
            avg_val = 0.
            for run_dict in results_dict.values():
                avg_val += float(run_dict[key])
            avg_results_dict[key] = float('{:.3f}'.format(avg_val / num_runs))

    json_dump(avg_results_dict, avg_results_json)

    print(f'=> Average results succesfully written to {avg_results_json}.')
 def update_index_json(self, idx):
     """ Update current run/search idx.
     """
     json_dump({'last_idx': idx, 'end_idx': self.end_idx}, self.index_json)