Example #1
0
def _get_wc_coefficients(modelspec):
    # TODO: what about modelspecs with multiple weight_channesl?
    for m in modelspec:
        if 'weight_channels' in m['fn']:
            if 'fn_coefficients' in m.keys():
                fn = ms._lookup_fn_at(m['fn_coefficients'])
                kwargs = {**m['fn_kwargs'], **m['phi']}  # Merges dicts
                return fn(**kwargs)
            else:
                return m['phi']['coefficients']
    return None
Example #2
0
def evaluate_step(xfa, context={}):
    '''
    Helper function for evaluate. Take one step
    SVD revised 2018-03-23 so specialized xforms wrapper functions not required
      but now xfa can be len 4, where xfa[2] indicates context in keys and
      xfa[3] is context out keys
    '''
    if not (len(xfa) == 2 or len(xfa) == 4):
        raise ValueError('Got non 2- or 4-tuple for xform: {}'.format(xfa))
    xf = xfa[0]
    xfargs = xfa[1]
    if len(xfa) > 2:
        context_in = {k: context[k] for k in xfa[2]}
    else:
        context_in = context
    if len(xfa) > 3:
        context_out_keys = xfa[3]
    else:
        context_out_keys = []

    fn = ms._lookup_fn_at(xf)
    # Check for collisions; more to avoid confusion than for correctness:
    for k in xfargs:
        if k in context_in:
            m = 'xf arg {} overlaps with context: {}'.format(k, xf)
            raise ValueError(m)
    # Merge args into context, and make a deepcopy so that mutation
    # inside xforms will not be propogated unless the arg is returned.
    merged_args = {**xfargs, **context_in}
    args = copy.deepcopy(merged_args)
    # Run the xf
    log.info('Evaluating: {}'.format(xf))
    new_context = fn(**args)
    if len(context_out_keys):
        if type(new_context) is tuple:
            # print(new_context)
            new_context = {
                k: new_context[i]
                for i, k in enumerate(context_out_keys)
            }
        elif len(context_out_keys) == 1:
            new_context = {context_out_keys[0]: new_context}
        else:
            raise ValueError(
                'len(context_out_keys) needs to match number of outputs from xf fun'
            )
    # Use the new context for the next step
    if type(new_context) is not dict:
        raise ValueError('xf did not return a context dict: {}'.format(xf))
    context_out = {**context, **new_context}

    return context_out
Example #3
0
File: heatmap.py Project: LBHB/NEMS
def _get_fir_coefficients(modelspec, idx=0, fs=None):
    i = 0
    for m in modelspec:
        if 'fir' in m['fn']:
            if 'fn_coefficients' in m.keys():
                fn = ms._lookup_fn_at(m['fn_coefficients'])
                kwargs = {**m['fn_kwargs'], **m['phi']}  # Merges dicts
                return fn(**kwargs)

            #elif 'pole_zero' in m['fn']:
            #    c = pz_coefficients(poles=m['phi']['poles'],
            #                        zeros=m['phi']['zeros'],
            #                        delays=m['phi']['delays'],
            #                        gains=m['phi']['gains'],
            #                        n_coefs=m['fn_kwargs']['n_coefs'], fs=100)
            #    return c
            elif 'dexp' in m['fn']:
                c = fir_dexp_coefficients(phi=m['phi']['phi'],
                                          n_coefs=m['fn_kwargs']['n_coefs'])
                return c
            elif 'exp' in m['fn']:
                tau = m['phi']['tau']

                if 'a' in m['phi']:
                    a = m['phi']['a']
                else:
                    a = m['fn_kwargs']['a']

                if 'b' in m['phi']:
                    b = m['phi']['b']
                else:
                    b = m['fn_kwargs']['b']

                c = fir_exp_coefficients(tau,
                                         a=a,
                                         b=b,
                                         n_coefs=m['fn_kwargs']['n_coefs'])
                return c
            elif i == idx:
                coefficients = m['phi']['coefficients']
                if 'offsets' in m['phi']:
                    if fs is None:
                        log.warning("couldn't compute offset coefficients for "
                                    "STRF heatmap, no fs provided.")
                    else:
                        coefficients = _offset_coefficients(
                            coefficients, m['phi']['offsets'], fs=fs)
                return coefficients
            else:
                i += 1
    return None
Example #4
0
def _get_wc_coefficients(modelspec, idx=0):
    i = 0
    for m in modelspec:
        if 'weight_channels' in m['fn']:
            if 'fn_coefficients' in m.keys():
                if i == idx:
                    fn = ms._lookup_fn_at(m['fn_coefficients'])
                    kwargs = {**m['fn_kwargs'], **m['phi']}  # Merges dicts
                    return fn(**kwargs)
                else:
                    i += 1
            else:
                if i == idx:
                    return m['phi']['coefficients']
                else:
                    i += 1
    return None
Example #5
0
def enqueue_models_view():
    """Call modelfit.enqueue_models with user selections as args."""

    user = get_current_user()

    # Only pull the numerals from the batch string, leave off the description.
    bSelected = request.args.get('bSelected')[:3]
    cSelected = request.args.getlist('cSelected[]')
    mSelected = request.args.getlist('mSelected[]')
    codeHash = request.args.get('codeHash')
    execPath = request.args.get('execPath')
    scriptPath = request.args.get('scriptPath')
    force_rerun = request.args.get('forceRerun', type=int)
    useKamiak = request.args.get('useKamiak', type=int)
    kamiakFunction = request.args.get(
        'kamiakFunction')  # fn to generate scripts
    kamiakPath = request.args.get('kamiakPath')  # path to store output in
    loadKamiak = request.args.get('loadKamiak',
                                  type=int)  # check to load results
    kamiakResults = request.args.get('kamiakResults')  # path to results
    useGPU = request.args.get('useGPU', type=int)  # path to results
    useExacloud = request.args.get('useExacloud', type=int)
    exaOHSU = request.args.get('exaOHSU')
    exaExec = request.args.get('exaExec')
    exaScript = request.args.get('exaScript')
    exaLimit = request.args.get('exaLimit')
    exaExclude = request.args.get('exaExclude')
    exaHighMem = request.args.get('exaHighMem', type=int)

    if loadKamiak:
        kamiak_to_database(cSelected, bSelected, mSelected, kamiakResults,
                           execPath, scriptPath)
        return jsonify(data=True)

    elif useExacloud:
        log.info('Starting exacloud jobs!')
        enqueue_exacloud_models(cellist=cSelected,
                                batch=bSelected,
                                modellist=mSelected,
                                user=user.username,
                                linux_user=exaOHSU,
                                executable_path=exaExec,
                                script_path=exaScript,
                                time_limit=exaLimit,
                                useGPU=useGPU,
                                high_mem=exaHighMem,
                                exclude=exaExclude)
        return jsonify(data=True)

    elif useKamiak:
        # kamiakFunction should be a stringified pointer to a function
        # that takes a list of cellids, a batch, a list of modelnames,
        # and a directory where the output should be stored,
        # Ex: kamiakScript = 'nems_lbhb.utils.my_kamiak_function'
        try:
            kamiak_script = _lookup_fn_at(kamiakFunction, ignore_table=True)
            kamiak_script(cSelected, bSelected, mSelected, kamiakPath)
            return jsonify(data=True)
        except AttributeError:
            log.warning('kamiakFunction doesnt exist or is improperly defined')
            return jsonify(data=False)
    else:
        if not codeHash:
            codeHash = 'master'
        if not execPath:
            execPath = None
        if not scriptPath:
            scriptPath = None

        enqueue_models(cSelected,
                       bSelected,
                       mSelected,
                       force_rerun=bool(force_rerun),
                       user=user.username,
                       codeHash=codeHash,
                       executable_path=execPath,
                       script_path=scriptPath,
                       GPU_job=useGPU)

        return jsonify(data=True)