예제 #1
0
    def get_index_range(self, num_iter):
        """ Get index range for runs/search loop.

        If resuming, get the last run/search index from
        index json file and continue from there;
        else, if provided by user, use start_idx and/or end_idx;
        otherwise, set start_idx = 0, end_idx = num_iter.
        """
        start_idx = 0
        end_idx = num_iter

        if self.args.resume is True:
            search_dict = json_load(self.index_json)
            start_idx = search_dict['last_idx']
            end_idx = search_dict['end_idx']
            print(f'==> Run idx {start_idx}-{end_idx}')

        elif self.args.start_idx is not None:
            if self.args.start_idx < num_iter:
                start_idx = self.args.start_idx
            if self.args.end_idx is not None and self.args.end_idx < num_iter:
                end_idx = self.args.end_idx

            print(f'==> Run idx {start_idx}-{end_idx}')

        self.end_idx = end_idx

        return start_idx, end_idx
예제 #2
0
    def load_results_dict(cls, log_dir, mode='train'):
        """ Load results dictionary from json file corresponding to the
        train or test results in log_dir.
        """
        assert mode in ['train', 'test'], 'mode should be "train" or "test"...'
        if mode == 'train':
            results_json_filename = cls.train_results_json_filename
        else:
            results_json_filename = cls.test_results_json_filename

        results_json = os.path.join(log_dir, results_json_filename)
        results_dict = json_load(results_json)

        return results_dict
예제 #3
0
    def log_additional_info(self):
        """ Log additional information to self.results_json
        """
        if not self.params['additional_info']: # empty list
            return

        results_dict = json_load(self.results_json)

        if 'sparsity' in self.params['additional_info']:
            results_dict[self.params['model_name']]['sparsity'] = \
                                        '{:d}'.format(self.net.compute_sparsity())

        if 'lipschitz_bound' in self.params['additional_info']:
            results_dict[self.params['model_name']]['lipschitz_bound'] = \
                                        '{:.3f}'.format(self.net.lipschitz_bound())

        json_dump(results_dict, self.results_json)
예제 #4
0
    def init_json(self):
        """ Init json file for train/test results.
        """
        # initialize/verify json log file
        self.results_json = os.path.join(self.params['log_dir'],
                                        self.results_json_filename)

        if not os.path.isfile(self.results_json):
            results_dict = {}
        else:
            results_dict = json_load(self.results_json)

        if self.params['model_name'] not in results_dict:
            results_dict[self.params['model_name']] = {} # initialize model log

        # add minimal information for sorting models in results_json file
        if self.sorting_key not in results_dict[self.params['model_name']]:
            results_dict[self.params['model_name']][self.sorting_key] = 0.

        json_dump(results_dict, self.results_json)

        comb_list = list(itertools.product(['latest', 'best'], ['train', 'valid'],['acc', 'loss']))
        self.info_list = ['_'.join(k) for k in comb_list] + ['test_acc', 'test_loss']
예제 #5
0
    def update_json(self, info, value):
        """ Update json file with latest/best validation/test accuracy/loss, if training,
        and with test accuracy otherwise

        Args:
            info: e.g. 'latest_validation_loss'
        """
        assert info in self.info_list, f'{info} should be in {self.info_list}...'

        # save in json
        results_dict = json_load(self.results_json)

        if isinstance(value, dict):
            if info not in self.params["model_name"]:
                results_dict[self.params["model_name"]][info] = {}
            for key, val in value.items():
                results_dict[self.params["model_name"]][info][key] = float('{:.3f}'.format(val))
        else:
            results_dict[self.params["model_name"]][info] = float('{:.3f}'.format(value))

        sorted_acc = sorted(results_dict.items(), key=lambda kv : kv[1][self.sorting_key], reverse=True)
        sorted_results_dict = collections.OrderedDict(sorted_acc)

        json_dump(sorted_results_dict, self.results_json)
if __name__ == "__main__":

    # parse arguments
    parser = argparse.ArgumentParser(description='Plot sparsity, error rate and lipschitz '
                                        'vs TV(2) regularization weight.'),
                                    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--sparsified_log_dir', type=str, help='')
    parser.add_argument('--savefig', action='store_true', help='')
    parser.add_argument('--output', metavar='output folder', type=str, help='')
    args = parser.parse_args()

    if args.sparsified_log_dir is None:
        raise ValueError('Need to provide sparsified_log_dir')

    results_json = os.path.join(args.sparsified_log_dir, 'avg_results.json')
    results_dict = json_load(results_json)

    models = results_dict.keys()
    lmbdas = np.zeros(len(models))
    error_rates = np.zeros(len(models))
    sparsities = np.zeros(len(models))
    lipschitz_bounds = np.zeros(len(models))

    for i, model in enumerate(models):
        model_name_split = model.split('_')
        lmbda_idx = model_name_split.index('lmbda') + 1
        lmbda = float(model_name_split[lmbda_idx])

        lmbdas[i] = lmbda
        error_rates[i] = 100. - eval(results_dict[model]['valid_acc']['median'])[0]
        sparsities[i] = eval(results_dict[model]['sparsity']['median'])[0]
예제 #7
0
    for mode in ['train', 'test']:
        if mode == 'train':
            base_json_filename = Project.train_results_json_filename.split('.')[0]
            sorting_key = Project.train_sorting_key
        else:
            base_json_filename = Project.test_results_json_filename.split('.')[0]
            sorting_key = Project.test_sorting_key

        dictio = {}
        for log_dir in [args.log_dir1, args.log_dir2]:
            base_json_path = os.path.join(log_dir, base_json_filename)
            json_path = None
            if os.path.isfile(base_json_path + '_merged.json'):
                json_path = base_json_path + '_merged.json'
            elif os.path.isfile(base_json_path + '.json'):
                json_path = base_json_path + '.json'
            else:
                raise ValueError(f'Did not find file {base_json_path}[.json][_merged.json] ...')

            print(f'Found file {json_path}')
            # merge with simple_json if no merged.json file seen, otherwise merge with merged.json
            results_dict = json_load(json_path)
            dictio = {**dictio, **results_dict}

        assert len(dictio) > 0
        sorted_results = sorted(dictio.items(), key=lambda kv : kv[1][sorting_key], reverse=True)
        sorted_results_dict = collections.OrderedDict(sorted_results)

        merged_json_path = os.path.join(args.log_dir2, base_json_filename + '_merged.json')
        json_dump(sorted_results_dict, merged_json_path)
        metavar='json_file_path[STR]',
        type=str,
        help='path to json file with the train/test runs results.')
    parser.add_argument('key',
                        metavar='key[STR]',
                        type=str,
                        help='Key to sort json file by.')
    order_choices = {'ascending', 'descending'}
    parser.add_argument('--order',
                        metavar='STR',
                        choices=order_choices,
                        default='descending',
                        help=f'Sorting order. Choices: {str(order_choices)}')
    args = parser.parse_args()

    results_dict = json_load(args.json_file_path)
    first = next(iter(results_dict))
    assert not isinstance(results_dict[first][args.key], dict), \
                            'model_run[key] should be int or float.'

    sorted_results = sorted(results_dict.items(),
                            key=lambda kv: kv[1][args.key],
                            reverse=(args.order == 'descending'))
    sorted_results_dict = collections.OrderedDict(sorted_results)

    path_split = args.json_file_path.split('/')
    log_dir, json_file = '/'.join(path_split[:-1]), path_split[-1]
    sorted_results_json = os.path.join(log_dir,
                                       f'{args.key}_sorted_' + json_file)

    json_dump(sorted_results_dict, sorted_results_json)