Exemplo n.º 1
0
def add_dependent_params(parser, namespace):
    """Add params that are derived from json arguments."""

    if namespace.model_class == 'ae' \
            or namespace.model_class == 'vae' \
            or namespace.model_class == 'beta-tcvae' \
            or namespace.model_class == 'cond-vae' \
            or namespace.model_class == 'cond-ae' \
            or namespace.model_class == 'cond-ae-msp' \
            or namespace.model_class == 'ps-vae' \
            or namespace.model_class == 'labels-images':

        if namespace.model_type == 'conv':
            max_latents = 64
            parser.add_argument('--max_latents', default=max_latents)
            arch_dicts = load_handcrafted_arches(
                [namespace.n_input_channels, namespace.y_pixels, namespace.x_pixels],
                namespace.n_latents,
                namespace.ae_arch_json,
                check_memory=False,
                batch_size=namespace.approx_batch_size,
                mem_limit_gb=namespace.mem_limit_gb)
            parser.opt_list('--architecture_params', options=arch_dicts, tunable=True)

        elif namespace.model_type == 'linear':
            parser.add_argument('--n_ae_latents', default=namespace.n_latents, type=int)

        else:
            raise ValueError('%s is not a valid model type' % namespace.model_type)

        # for i, arch_dict in enumerate(arch_dicts):
        #     if (arch_dict['ae_encoding_n_channels'][-1]
        #             * arch_dict['ae_encoding_x_dim'][-1]
        #             * arch_dict['ae_encoding_y_dim'][-1]) < namespace.n_latents[i]:
        #         raise ValueError('Bottleneck smaller than number of latents')

    else:
        if getattr(namespace, 'n_latents', False):
            parser.add_argument('--n_ae_latents', default=namespace.n_latents, type=int)

    if namespace.model_class.find('neural') > -1:

        # parse "subsample_idxs_names" arg to determine which index keys to fit; the code below
        # currently supports 'all' (all idx keys) or a single string (single idx key)
        if namespace.subsample_method != 'none':
            if namespace.subsample_idxs_dataset == 'all':
                from behavenet.data.utils import get_region_list
                idx_list = get_region_list(namespace)
                parser.opt_list(
                    '--subsample_idxs_name', options=idx_list, tunable=True)
            elif isinstance(namespace.subsample_idxs_dataset, str):
                parser.add_argument(
                    '--subsample_idxs_name', default=namespace.subsample_idxs_dataset)
            else:
                raise ValueError(
                    '%s is an invalid data type for "subsample_idxs_dataset" key in data json; ' %
                    type(namespace.subsample_idxs_dataset) +
                    'must be a string ("all" or "name")')
    else:
        pass
Exemplo n.º 2
0
def test_get_region_list(tmpdir):

    # make tmp hdf5 file
    path = tmpdir.join('data.hdf5')
    idx_data = {
        'i0': np.array([0, 1, 2]),
        'i1': np.array([3, 4, 5]),
        'i2': np.array([6, 7, 8])
    }
    with h5py.File(path, 'w') as f:
        group0 = f.create_group('group0')
        # groupa = f.create_group('groupa')
        group1 = group0.create_group('group1')
        group1.create_dataset('i0', data=idx_data['i0'])
        group1.create_dataset('i1', data=idx_data['i1'])
        group1.create_dataset('i2', data=idx_data['i2'])

    # correct indices are returned
    hparams = {
        'data_dir': tmpdir,
        'lab': '',
        'expt': '',
        'animal': '',
        'session': '',
        'subsample_idxs_group_0': 'group0',
        'subsample_idxs_group_1': 'group1'
    }
    idx_return = utils.get_region_list(hparams)
    for key in idx_data.keys():
        assert np.all(idx_data[key] == idx_return[key])

    # raise exception when first group is invalid
    hparams['subsample_idxs_group_0'] = 'group2'
    hparams['subsample_idxs_group_1'] = 'group1'
    with pytest.raises(ValueError):
        utils.get_region_list(hparams)

    # raise exception when first group contains no second group
    hparams['subsample_idxs_group_0'] = 'groupa'
    hparams['subsample_idxs_group_1'] = 'group1'
    with pytest.raises(ValueError):
        utils.get_region_list(hparams)

    # raise exception when second group is invalid
    hparams['subsample_idxs_group_0'] = 'group0'
    hparams['subsample_idxs_group_1'] = 'group2'
    with pytest.raises(ValueError):
        utils.get_region_list(hparams)
Exemplo n.º 3
0
def get_r2s_by_trial(hparams, model_types):
    """For a given session, load R^2 metrics from all decoders defined by hparams.

    Parameters
    ----------

    hparams : :obj:`dict`
        needs to contain enough information to specify decoders
    model_types : :obj:`list` of :obj:`strs`
        'mlp' | 'mlp-mv' | 'lstm'

    Returns
    -------
    :obj:`pd.DataFrame`
        pandas dataframe of decoder validation metrics

    """

    dataset = _get_dataset_str(hparams)
    region_names = get_region_list(hparams)

    metrics = []
    model_idx = 0
    model_counter = 0
    for region in region_names:
        hparams['region'] = region
        for model_type in model_types:

            hparams['session_dir'], _ = get_session_dir(
                hparams, session_source=hparams.get('all_source', 'save'))
            expt_dir = get_expt_dir(hparams,
                                    model_type=model_type,
                                    model_class=hparams['model_class'],
                                    expt_name=hparams['experiment_name'])

            # gather all versions
            try:
                versions = get_subdirs(expt_dir)
            except Exception:
                print('No models in %s; skipping' % expt_dir)

            # load csv files with model metrics (saved out from test tube)
            for i, version in enumerate(versions):
                # read metrics csv file
                model_dir = os.path.join(expt_dir, version)
                try:
                    metric = pd.read_csv(os.path.join(model_dir,
                                                      'metrics.csv'))
                    model_counter += 1
                except FileNotFoundError:
                    continue
                with open(os.path.join(model_dir, 'meta_tags.pkl'), 'rb') as f:
                    hparams = pickle.load(f)
                # append model info to metrics ()
                version_num = version[8:]
                metric['version'] = str('version_%i' % model_idx + version_num)
                metric['region'] = region
                metric['dataset'] = dataset
                metric['model_type'] = model_type
                for key, val in hparams.items():
                    if isinstance(val, (str, int, float)):
                        metric[key] = val
                metrics.append(metric)

            model_idx += 10000  # assumes no more than 10k model versions/expt
    # put everything in pandas dataframe
    metrics_df = pd.concat(metrics, sort=False)
    return metrics_df