Beispiel #1
0
def multiscale(srcDir, dstDir, ratio, resolutions, compact, excludeRegex):
    # Derive data
    imgWidths = list(map(lambda x: round(ratio * x), resolutions))
    for imgWidth, srcFileName in cproduct(imgWidths, os.listdir(srcDir)):
        srcFilePath = os.path.join(srcDir, srcFileName)
        if not isImageFile(srcFilePath) or isExcluded(srcFilePath,
                                                      excludeRegex):
            continue
        # Create destination path according to user compact option
        if compact:
            newFileName = appendToFileName(srcFileName, ("-" + str(imgWidth)))
            dstFilePath = os.path.join(dstDir, newFileName)
        else:
            subDirPath = os.path.join(dstDir, "{}x".format(imgWidth))
            dstFilePath = os.path.join(subDirPath, srcFileName)
            if not os.access(subDirPath, os.R_OK):
                os.mkdir(subDirPath)
        # Load, resize and save image
        channel(srcFilePath, [
            loadImg,
            partial(scaleImg, imgWidth, imgWidth, keepRatio=True),
            partial(saveImg, dstFilePath)
        ])
    print("Generated imgage info: [[<resolution>, <scale>]...]")
    res_width = []
    for width, res in zip(imgWidths, resolutions):
        res_width.append([width, res])
    print(res_width)
Beispiel #2
0
    def get_utility_map(self):
        """Calculate utility for moving to every position in environment."""
        next_pos_idxs = list(
            cproduct(range(self.env.nx), range(self.env.ny),
                     range(self.env.nz)))

        self.calc_util(next_pos_idxs)

        return self.move_utils.reshape(self.env.shape)
Beispiel #3
0
    def get_utility_map(self):
        """Calculate utility for moving to every position in environment."""
        next_pos_idxs = list(cproduct(range(self.env.nx),
                                      range(self.env.ny),
                                      range(self.env.nz)))

        self.calc_util(next_pos_idxs)

        return self.move_utils.reshape(self.env.shape)
def zip_cproduct(z, c, order, kwargs):
    zipped = zip(*[kwargs[k] for k in z])
    temp_0 = list(cproduct(*([zipped] + list([kwargs[k] for k in c]))))
    temp_1 = [ii[0] + ii[1:] for ii in temp_0]

    current_order = z + c
    temp_2 = [
        tuple([ii[current_order.index(jj)] for jj in order])
        for ii in temp_1
        ]

    return temp_2
Beispiel #5
0
 def __call__(self, config_path: Path, run_id: str, device: Optional[str]):
     with config_path.open() as fp:
         config: Dict[str, Any] = json.load(fp)
         if not "device" in config:
             config["device"] = device
     iterable_config = self._build_iterator_config(config)
     operational_config = self._inject_values_from_default_config(
         iterable_config)
     for exp_num, (model, epoch, batch_size,
                   dataset, resolution, optimizer,
                   logs_dir, device, conv_method,
                   delta, data_parallel, downsampling, cache_dir) in \
             enumerate(cproduct(
                 operational_config['model'],
                 operational_config['epoch'],
                 operational_config['batch_size'],
                 operational_config['dataset'],
                 operational_config['resolution'],
                 operational_config['optimizer'],
                 operational_config['logs_dir'],
                 operational_config['device'],
                 operational_config['conv_method'],
                 operational_config['delta'],
                 operational_config['data_parallel'],
                 operational_config['downsampling'],
                 operational_config['cache_dir']
             )):
         print(
             "Running experiment", exp_num + 1, "of",
             np.product([
                 len(operational_config[key])
                 for key in operational_config.keys() if key != 'metrics'
             ]))
         executor = TrainTestExecutor(self._mode)
         executor(exp_num=exp_num,
                  optimizer=optimizer,
                  dataset=dataset,
                  model=model,
                  metrics=operational_config['metrics'],
                  batch_size=batch_size,
                  epoch=epoch,
                  device=device,
                  logs_dir=logs_dir,
                  delta=delta,
                  data_parallel=data_parallel,
                  downsampling=downsampling,
                  resolution=resolution,
                  run_id=run_id,
                  model_module=self._model_module,
                  dataset_module=self._dataset_module,
                  optimizer_module=self._optimizer_module,
                  metric_module=self._metrics_module,
                  cache_dir=cache_dir)
Beispiel #6
0
def show_distributions(EXPERIMENTS, VARIABLES, ODOR_STATES, AX_GRID):

    AX_SIZE = (6, 4)
    LW = 2
    COLORS = ('b', 'g', 'r')

    fig_size = (AX_SIZE[0] * AX_GRID[1], AX_SIZE[1] * AX_GRID[0])

    fig, axs = plt.subplots(*AX_GRID, figsize=fig_size, tight_layout=True)

    for ax, (expt_id, variable) in zip(axs.flatten(),
                                       cproduct(EXPERIMENTS, VARIABLES)):

        handles = []

        for odor_state, color in zip(ODOR_STATES, COLORS):

            tp_dstr = session.query(models.TimepointDistribution).filter_by(
                variable=variable,
                experiment_id=expt_id,
                odor_state=odor_state).first()

            handles.append(
                ax.plot(tp_dstr.bincs,
                        tp_dstr.cts,
                        lw=LW,
                        color=color,
                        label=odor_state)[0])

        ax.set_xlabel(variable)
        ax.set_ylabel('counts')

        ax.legend(handles=handles)

        ax.set_title('{}\n{}'.format(expt_id, variable))

    for ax in axs.flatten():

        set_font_size(ax, 16)

    return fig
def show_distributions(EXPERIMENTS, VARIABLES, ODOR_STATES, AX_GRID):

    AX_SIZE = (6, 4)
    LW = 2
    COLORS = ('b', 'g', 'r')

    fig_size = (AX_SIZE[0] * AX_GRID[1], AX_SIZE[1] * AX_GRID[0])

    fig, axs = plt.subplots(*AX_GRID,
                            figsize=fig_size,
                            tight_layout=True)

    for ax, (expt_id, variable) in zip(axs.flatten(), cproduct(EXPERIMENTS, VARIABLES)):

        handles = []

        for odor_state, color in zip(ODOR_STATES, COLORS):

            tp_dstr = session.query(models.TimepointDistribution).filter_by(
                variable=variable, experiment_id=expt_id,
                odor_state=odor_state).first()

            handles.append(ax.plot(
                tp_dstr.bincs, tp_dstr.cts, lw=LW, color=color, label=odor_state)[0])

        ax.set_xlabel(variable)
        ax.set_ylabel('counts')

        ax.legend(handles=handles)

        ax.set_title('{}\n{}'.format(expt_id, variable))

    for ax in axs.flatten():

        set_font_size(ax, 16)

    return fig
Beispiel #8
0
def load_brain_dist_matrix(labels, in_mm=True):
    """
    Load brain distance matrix.
    :param labels: sequence of labels (i.e., ordering of rows/cols)
    :param in_mm: if True, distance matrix is returned in mm, if False in microns
    :return: distance matrix
    """
    if not os.path.exists(DIST_DATA_FILE_NAME):
        print('Downloading data to {}...'.format(DIST_DATA_FILE_NAME))
        urllib.urlretrieve(DIST_DATA_FILE_URL, DIST_DATA_FILE_NAME)

    data = pd.read_excel(DIST_DATA_FILE_NAME)
    n_nodes = len(labels)
    dists = np.zeros((n_nodes, n_nodes), dtype=float)

    for idx_1, idx_2 in cproduct(range(n_nodes), range(n_nodes)):

        label_1 = labels[idx_1]
        label_2 = labels[idx_2]

        base_1 = label_1[:-2]
        base_2 = label_2[:-2]

        if label_1[-1] == label_2[-1]:
            ext_1 = '_ipsi'
            ext_2 = '_ipsi'
        else:
            ext_1 = '_ipsi'
            ext_2 = '_contra'

        dists[idx_1, idx_2] = data[base_1 + ext_1][base_2 + ext_2]

    if in_mm:
        dists /= 1000

    return dists
Beispiel #9
0
def load_brain_dist_matrix(labels, in_mm=True):
    """
    Load brain distance matrix.
    :param labels: sequence of labels (i.e., ordering of rows/cols)
    :param in_mm: if True, distance matrix is returned in mm, if False in microns
    :return: distance matrix
    """
    if not os.path.exists(DIST_DATA_FILE_NAME):
        print('Downloading data to {}...'.format(DIST_DATA_FILE_NAME))
        urllib.urlretrieve(DIST_DATA_FILE_URL, DIST_DATA_FILE_NAME)

    data = pd.read_excel(DIST_DATA_FILE_NAME)
    n_nodes = len(labels)
    dists = np.zeros((n_nodes, n_nodes), dtype=float)

    for idx_1, idx_2 in cproduct(range(n_nodes), range(n_nodes)):

        label_1 = labels[idx_1]
        label_2 = labels[idx_2]

        base_1 = label_1[:-2]
        base_2 = label_2[:-2]

        if label_1[-1] == label_2[-1]:
            ext_1 = '_ipsi'
            ext_2 = '_ipsi'
        else:
            ext_1 = '_ipsi'
            ext_2 = '_contra'

        dists[idx_1, idx_2] = data[base_1 + ext_1][base_2 + ext_2]

    if in_mm:
        dists /= 1000

    return dists
def record_connectivity_analysis(
        SEED, GROUP, LOG_FILE,
        V_TH, G_W, G_X, RP,
        N, LS, QS, MATCH_PERCENTS, N_TRIALS, N_STIM_SEQS):
    """
    Analyze the dependence of replay probability on the percent match between
    the stimulus transition matrix and network connectivity.
    """
    # preliminaries
    session = db.connect_and_make_session('nothing_but_reruns')
    db.prepare_logging(LOG_FILE)
    np.random.seed(SEED)

    for l, q in cproduct(LS, QS):

        logging.info('Running sim. for L = {}, Q = {}'.format(l, q))
        replay_probs = np.nan * np.zeros((N_TRIALS, len(MATCH_PERCENTS)))

        for trial_ctr in range(N_TRIALS):

            logging.info('Trial {} started.'.format(trial_ctr + 1))

            # generate random stimulus transition matrix
            while True:
                trs = (np.random.rand(N, N) < q).astype(float)
                np.fill_diagonal(trs, 0)
                if np.all(trs.sum(axis=0) > 0): break

            w_stim = trs.copy()

            # normalize all columns to 1 to make it probabilistic
            for col_ctr in range(N):
                trs[:, col_ctr] /= trs[:, col_ctr].sum()

            p_0 = get_stationary_distribution(trs)

            # loop over match percentages
            w_rand = (np.random.rand(N, N) < q).astype(float)

            for mp_ctr, mp in enumerate(MATCH_PERCENTS):

                w = w_rand.copy()
                mask = np.random.rand(*w.shape) < mp
                w[mask] = w_stim[mask]
                w *= G_W

                # make network
                ntwk = network.BasicWithAthAndTwoLevelStdp(
                    th=V_TH, w=w, g_x=G_X, t_x=2 * l, rp=RP, stdp_params=None)

                correct_ctr = 0

                for _ in range(N_STIM_SEQS):

                    drives = np.zeros((2 * l + 2, N))
                    seq = sample_markov_chain(p_0, trs, l)
                    for ctr, node in enumerate(seq):

                        drives[ctr + 1, node] = 1

                    drives[l + 2, seq[0]] = 1

                    r_0 = np.zeros((N,))
                    xc_0 = np.zeros((N,))

                    rs, _ = ntwk.run(r_0, xc_0, 5*drives)

                    if np.all(rs[l+2:2*l+2, :] == drives[1:l+1, :]): correct_ctr += 1

                replay_probs[trial_ctr, mp_ctr] = correct_ctr / N_STIM_SEQS

        car = _models.ConnectivityAnalysisResult(
            group=GROUP,
            n=N, l=l, q=q,
            match_percents=MATCH_PERCENTS,
            n_trials=N_TRIALS, n_stim_seqs=N_STIM_SEQS,
            v_th=V_TH, g_w=G_W, g_x=G_X, rp=RP,
            replay_probs=replay_probs.tolist())

        session.add(car)
        session.commit()
    session.close()
Beispiel #11
0
def _GetQueryStr():
    """解析查询配置文件中关于查询条件部分,生成查询命令
  """
    _query = []
    fullquery = {}
    query = {}
    _branchs = []
    _owners = []
    _tmp = []
    if _GetOption('query.branch'):
        _tmp.append(_GetOption('query.branch'))
    if (_GetOption('addons.branchfile')
            and os.path.isfile(_GetOption('addons.branchfile'))):
        with open(_GetOption('addons.branchfile'), 'r') as fbranch:
            for i in fbranch.readlines():
                _tmp.append(i.strip())
    _branchs = sorted(set(_tmp), key=_tmp.index)
    _tmp = []
    if _GetOption('query.owner'):
        _tmp.append(_GetOption('query.owner'))
    if (_GetOption('addons.ownerfile')
            and os.path.isfile(_GetOption('addons.ownerfile'))):
        with open(_GetOption('addons.ownerfile'), 'r') as fbranch:
            for i in fbranch.readlines():
                _tmp.append(i.strip())
    _owners = sorted(set(_tmp), key=_tmp.index)
    _custom = None
    _cquery = {}
    if _GetOption('query.custom'):
        for i in _GetOption('query.custom').split(' '):
            if len(i.split(':')) > 0:
                _cquery[i.split(':')[0]] = i.split(':')[1]
    if (len(_branchs) > 0 and len(_owners) > 0):
        for cp in cproduct(_branchs, _owners):
            query = {}
            if _GetOption('query.status'):
                query['status'] = _GetOption('query.status')
            query['branch'] = cp[0]
            query['owner'] = cp[1]
            fullquery = dict(query, **_cquery)
            _query.append(fullquery)
    elif len(_branchs) > 0:
        for _b in _branchs:
            query = {}
            query['branch'] = _b
            if _GetOption('query.status'):
                query['status'] = _GetOption('query.status')
            fullquery = dict(query, **_cquery)
            _query.append(fullquery)
    elif len(_owners) > 0:
        for _o in _owners:
            query = {}
            query['owner'] = _o
            if _GetOption('query.status'):
                query['status'] = _GetOption('query.status')
            fullquery = dict(query, **_cquery)
            _query.append(fullquery)
    else:
        query = {}
        if _GetOption('query.status'):
            query['status'] = _GetOption('query.status')
        fullquery = dict(query, **_cquery)
        _query.append(fullquery)
    # At There Re format value
    res_filename = datetime.today().strftime('%Y%m%d%H%M%S')
    res_filetype = 'csv'
    if _GetOption('output.filetype'):
        res_filetype = _GetOption('output.filetype')
    try:
        if (_GetOption('output.filename')
                and _GetOption(_GetOption('output.filename'))
                and len(_GetOption(_GetOption('output.filename'))) > 0):
            res_filename = _GetOption(_GetOption('output.filename'))
        else:
            res_filename = _GetOption('output.filename')
    except:
        res_filename = _GetOption('output.filename')
    finally:
        res_filename = res_filename + '.'
        res_filename = res_filename + res_filetype
    return (res_filename, _GetOption('query.limit'), _query)