def run():
    defaults = PaperDefaults()

    #David's globals
    size = 51
    csize = 9
    npoints = 32
    scale = 2.0
    cval = 0.5
    csvfiles = [[ defaults._DATADIR + \
        '/TB2015_%i_%s.csv' % (i, s) \
        for i in range(-90, 90, 30)] for s in ('PS', 'PO')
    ]

    # experiment parameters
    ppop = {
        'kind': 'circular',
        'npoints': npoints,
        'scale': scale,
        'fdomain': (0, 1),
    }

    vals_ang = sp.array([-90., -60., -30., 0., 30., 60.])
    vals = (vals_ang + 90.) / 180.
    imc1 = stim.get_center_surround(size=size,
                                    csize=csize,
                                    cval=cval,
                                    sval=sp.nan)
    x1 = model_utils.get_population(imc1, **ppop)
    x = sp.zeros((2, len(vals), npoints, size, size))

    for vdx, v in enumerate(vals):
        imc2 = stim.get_center_surround(size=size,
                                        csize=csize,
                                        cval=v,
                                        sval=sp.nan)
        ims = stim.get_center_surround(size=size,
                                       csize=csize,
                                       cval=sp.nan,
                                       sval=v)
        x2 = model_utils.get_population(imc2, **ppop)
        xs = model_utils.get_population(ims, **ppop)
        x[0, vdx] = (x1 + x2) / 2.
        x[1, vdx] = (x1 + x2) / 2. + xs
    x.shape = (2 * len(vals), npoints, size, size)

    # trott and born 2015 data
    gt = get_gt(npoints, csvfiles)

    extra_vars = {}
    extra_vars['scale'] = scale
    extra_vars['npoints'] = npoints
    extra_vars['cval'] = cval
    extra_vars['size'] = size
    extra_vars['csize'] = csize
    extra_vars['vals'] = vals
    extra_vars['figure_name'] = 'tbp'
    extra_vars['return_var'] = 'O'

    optimize_model(x, gt, extra_vars, defaults)
Exemple #2
0
def run(hps=None):
    defaults = PaperDefaults()

    #David's globals
    size=51
    csize=9
    npoints=37
    scale=1.
    _DEFAULT_BWC_CSV_CTS = sp.array([0.0, .06, .12, .25, .50]) * 100
    csvfiles=sp.array([[os.path.join(defaults._DATADIR, 'BWC2009_%i_%i.csv' \
    % (i, j)) for i in _DEFAULT_BWC_CSV_CTS] for j in _DEFAULT_BWC_CSV_CTS]).T

    # experiment parameters
    im = sp.array([
        stim.get_center_surround(
            size=size, csize=csize, cval=.25, sval=sp.nan),
        stim.get_center_surround(
            size=size, csize=csize, cval=.75, sval=sp.nan)])

    # populations for vertical (masking) and horizontal (driving) stimuli
    #####################################################################
    xv = model_utils.get_population(im[0],
        kind='circular', npoints=npoints, scale=scale)
    xh = model_utils.get_population(im[1],
        kind='circular', npoints=npoints, scale=scale)

    # superimposed populations
    ##########################
    v_contrasts = [0.0, .06, .12, .25, .50]
    h_contrasts = [0.0, .06, .12, .25, .50]
    nv, nh = len(v_contrasts), len(h_contrasts)
    x = sp.array([[h*xh + v*xv for h in h_contrasts] for v in v_contrasts])
    x.shape = (nv * nh,) + x.shape[2:]

    # busse and wade data
    t_paper = sp.zeros((nv, nh, 13))
    y_paper = sp.zeros((nv, nh, 13))

    for idx in range(nv):
        for jdx in range(nh):
            t_paper[idx, jdx], y_paper[idx, jdx] = \
                sp.genfromtxt(csvfiles[idx, jdx], delimiter=',').T

    res_y_paper = sp.zeros((y_paper.shape[0],y_paper.shape[1],npoints))
    for r in range(y_paper.shape[0]):
        for c in range(y_paper.shape[1]):
            res_y_paper[r,c,:] = sp.signal.resample(y_paper[r,c,:],npoints)
    gt = [t_paper,res_y_paper]

    extra_vars = {}
    extra_vars['scale'] = scale
    extra_vars['npoints'] = npoints
    extra_vars['size'] = size
    extra_vars['csize'] = csize
    extra_vars['nv'] = nv
    extra_vars['nh'] = nh
    extra_vars['figure_name'] = 'bw'
    extra_vars['return_var'] = 'O'

    optimize_model(x,gt,extra_vars,defaults)
def run():
    defaults = PaperDefaults()

    #David's globals
    _DEFAULT_TILTEFFECT_DEGPERPIX = .25  # <OToole77>
    _DEFAULT_TILTEFFECT_SIZE = 51  #101
    _DEFAULT_TILTEFFECT_CSIZE = iround(2. / _DEFAULT_TILTEFFECT_DEGPERPIX)
    _DEFAULT_TILTEFFECT_SSIZE = iround(8. / _DEFAULT_TILTEFFECT_DEGPERPIX)
    _DEFAULT_TILTEFFECT_CVAL = .5
    _DEFAULT_TILTEFFECT_SVALS = np.linspace(0.0, 0.5, 10)
    _DEFAULT_TILTEFFECT_SCALES = {'ow77': 0.40, 'ms79': 0.60}  #0.45
    _DEFAULT_TILTEFFECT_NPOINTS = 25  #100
    _DEFAULT_TILTEFFECT_DECODER_TYPE = 'circular_vote'
    _DEFAULT_TILTEFFECT_CSV = {
        'ow77': os.path.join(defaults._DATADIR, 'OW_fig4_Black.csv'),
        'ms79': os.path.join(defaults._DATADIR, 'MS1979.csv'),
    }

    # experiment parameters
    cpt = (_DEFAULT_TILTEFFECT_SIZE // 2, _DEFAULT_TILTEFFECT_SIZE // 2)
    spt = (_DEFAULT_TILTEFFECT_SIZE // 2,
           _DEFAULT_TILTEFFECT_SIZE // 2 + _DEFAULT_TILTEFFECT_CSIZE)
    dt_in = _DEFAULT_TILTEFFECT_CVAL - _DEFAULT_TILTEFFECT_SVALS

    # simulate populations
    im = sp.array([[
        stim.get_center_nfsurrounds(size=_DEFAULT_TILTEFFECT_SIZE,
                                    csize=_DEFAULT_TILTEFFECT_CSIZE,
                                    nsize=_DEFAULT_TILTEFFECT_CSIZE,
                                    fsize=_DEFAULT_TILTEFFECT_SSIZE,
                                    cval=_DEFAULT_TILTEFFECT_CVAL,
                                    nval=_DEFAULT_TILTEFFECT_CVAL,
                                    fval=sval,
                                    bgval=sp.nan)
    ] for sval in _DEFAULT_TILTEFFECT_SVALS])

    # get shifts for model for both papers, and from digitized data
    sortidx = sp.argsort(dt_in)  # re-order in increasing angular differences

    # O'Toole and Wenderoth (1977)
    _, ds_ow77_paper_y = sp.genfromtxt(_DEFAULT_TILTEFFECT_CSV['ow77'],
                                       delimiter=',').T

    extra_vars = {}
    extra_vars['scale'] = _DEFAULT_TILTEFFECT_SCALES['ow77']
    extra_vars['decoder'] = _DEFAULT_TILTEFFECT_DECODER_TYPE
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['cval'] = _DEFAULT_TILTEFFECT_CVAL
    extra_vars['sortidx'] = sortidx
    extra_vars['cpt'] = cpt
    extra_vars['spt'] = spt
    extra_vars['sval'] = sval
    extra_vars['kind'] = 'circular'
    extra_vars['figure_name'] = 'f3a'
    extra_vars['return_var'] = 'O'
    extra_vars['hp_file'] = os.path.join(defaults._FIGURES, 'best_hps.npz')

    optimize_model(im, ds_ow77_paper_y, extra_vars, defaults)
def run():
    defaults = PaperDefaults()

    #David's globals
    size = 51
    csize = 5
    npoints = 64
    scale = 2.0
    neuron_theta = 0.50
    cval = 0.5
    csvfiles = [ defaults._DATADIR + \
        '/TB2015_Fig1B_%s.csv' % (s,) \
    for s in range(-90, 90, 30) + ['CO']]

    # experiment parameters
    cvals = (sp.arange(-90, 90, 30) + 90.) / 180.
    svals = sp.linspace(0.0, 1.0, 6).tolist() + [sp.nan]

    neuron_thetas = sp.linspace(0.0, 1.0, npoints)
    neuron_idx = sp.argmin(sp.absolute(neuron_thetas - neuron_theta))

    stims = [
        stim.get_center_surround(size=size, csize=csize, cval=cv, sval=sv)
        for cv in cvals for sv in svals
    ]

    x = sp.array([
        model_utils.get_population(im,
                                   npoints=npoints,
                                   kind='circular',
                                   scale=scale,
                                   fdomain=(0, 1)) for im in stims
    ])

    # [Array shapes]
    # trott and born 2015 data
    gt = get_gt(csvfiles)

    extra_vars = {}
    extra_vars['scale'] = scale
    extra_vars['npoints'] = npoints
    extra_vars['cval'] = cval
    extra_vars['cvals'] = cvals
    extra_vars['svals'] = svals
    extra_vars['size'] = size
    extra_vars['csize'] = csize
    extra_vars['neuron_idx'] = neuron_idx
    extra_vars['figure_name'] = 'tbtcso'
    extra_vars['return_var'] = 'O'
    extra_vars['hp_file'] = os.path.join(defaults._FIGURES, 'best_hps.npz')

    optimize_model(x, gt, extra_vars, defaults)
Exemple #5
0
def run():
    defaults = PaperDefaults()

    #David's globals
    size = 51
    npoints = 64
    cval1 = 0.25
    cval2 = 0.75
    sval = 0.75
    test_contrasts = sp.array([0., 8., 32.])
    mask_contrasts = sp.array([0., 8., 32.])

    # experiment parameters
    idx1 = int(cval1 * npoints)
    idx2 = int(cval2 * npoints)

    # simulate populations
    imc = stim.get_center_surround(size=size, csize=9, cval=cval1, sval=sp.nan)
    ims = stim.get_center_surround(size=size, csize=9, cval=sp.nan, sval=sval)
    x1 = utils.get_population(imc,
                              npoints=npoints,
                              kind='gaussian',
                              scale=0.1,
                              fdomain=(0, 1))
    x2 = sp.roll(x1, int((cval2 - cval1) * npoints), axis=-3)
    xs = utils.get_population(ims,
                              npoints=npoints,
                              kind='gaussian',
                              scale=0.1,
                              fdomain=(0, 1))
    x = []

    for k1 in test_contrasts:
        for k2 in mask_contrasts:
            x.append(k1 / 100. * x1 + k2 / 100. * x2)
    x = sp.array(x) + sp.array([xs])

    # Experimental data
    extra_vars = {}
    extra_vars['size'] = size
    extra_vars['npoints'] = npoints
    extra_vars['sval'] = sval
    extra_vars['figure_name'] = 'cross_orientation_suppression'
    extra_vars['return_var'] = 'O'
    extra_vars['idx1'] = idx1
    extra_vars['idx2'] = idx2
    extra_vars['test_contrasts'] = test_contrasts
    extra_vars['mask_contrasts'] = mask_contrasts
    optimize_model(x, [], extra_vars, defaults)
def run():
    defaults = PaperDefaults()

    # David's globals
    size = 51
    nr = 17
    npoints = 32 // 2
    ncontrasts = 5

    # experiment parameters
    # generate stimuli
    ##################
    im = []
    for k in range(nr):
        im_ = sp.zeros((size, size)) + sp.nan
        im_[size // 2 - k:size // 2 + k + 1,
            size // 2 - k:size // 2 + k + 1] = 0.5
        im.append(im_)
    im = sp.array(im)

    # generate populations
    ######################
    contrasts = sp.linspace(1., 0., ncontrasts, endpoint=False)[::-1]
    # contrasts = sp.logspace(-2, 0., ncontrasts)
    x = sp.array([
        utils.get_population(xim_, 'gaussian', npoints=npoints) for xim_ in im
    ])
    ax = [c * x for c in contrasts]
    cx = np.concatenate(ax[:], axis=0)

    # Experimental data
    extra_vars = {}
    extra_vars['size'] = size
    extra_vars['npoints'] = npoints
    extra_vars['nr'] = nr
    extra_vars['stimsizes'] = 2 * sp.arange(nr) + 1
    extra_vars['ssn'] = defaults._DEFAULT_PARAMETERS['ssn']
    extra_vars['ssf'] = defaults._DEFAULT_PARAMETERS['ssf']
    extra_vars['hp_file'] = os.path.join(defaults._FIGURES, 'best_hps.npz')
    extra_vars['figure_name'] = 'size_tuning'
    extra_vars['return_var'] = 'O'
    extra_vars['contrasts'] = contrasts
    extra_vars['curvecols'] = sns.cubehelix_palette(ncontrasts)
    extra_vars['curvelabs'] = [
        'Single-cell response at contrast %g' % (cst, ) for cst in contrasts
    ]
    optimize_model(cx, None, extra_vars, defaults)
def run(create_stim=True):
    defaults = PaperDefaults()

    # David's globals
    _DEFAULT_TILTEFFECT_DEGPERPIX = .25  # <OToole77>
    _DEFAULT_TILTEFFECT_SIZE = 51  # 101
    _DEFAULT_TILTEFFECT_CSIZE = iround(2. / _DEFAULT_TILTEFFECT_DEGPERPIX)
    _DEFAULT_TILTEFFECT_CVAL = .5
    _DEFAULT_TILTEFFECT_SCALES = {'ow77': 0.40, 'ms79': 0.60}  # 0.45
    _DEFAULT_TILTEFFECT_NPOINTS = 25  # 100
    _DEFAULT_TILTEFFECT_DECODER_TYPE = 'circular_vote'

    # experiment parameters
    cpt = (_DEFAULT_TILTEFFECT_SIZE // 2, _DEFAULT_TILTEFFECT_SIZE // 2)
    spt = (_DEFAULT_TILTEFFECT_SIZE // 2,
           _DEFAULT_TILTEFFECT_SIZE // 2 + _DEFAULT_TILTEFFECT_CSIZE)

    # simulate populations
    fl = 'conv2_2'
    if create_stim:
        sys.path.append('../../')
        from MIRC_tests import features_vgg16
        im, im_names = features_vgg16.baseline_vgg16(
            images='/home/drew/Desktop/nrsa_png',
            num_images=25,
            feature_layer='content_vgg/' + fl + '/Relu:0',
            im_ext='.png',
            batch_size=1)
        target_size = [
            400, 400
        ]  # [_DEFAULT_TILTEFFECT_SIZE, _DEFAULT_TILTEFFECT_SIZE]
        im = normalize(resize(im, target_size=target_size).astype(float))
        np.save('raw_' + fl, im)

    extra_vars = {}
    extra_vars['scale'] = _DEFAULT_TILTEFFECT_SCALES['ow77']
    extra_vars['decoder'] = _DEFAULT_TILTEFFECT_DECODER_TYPE
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['cval'] = _DEFAULT_TILTEFFECT_CVAL
    extra_vars['cpt'] = cpt
    extra_vars['spt'] = spt
    extra_vars['kind'] = 'circular'
    extra_vars['figure_name'] = 'cnn_features'
    extra_vars['return_var'] = 'O'
    extra_vars['save_file'] = 'proc_' + fl
    optimize_model(im, None, extra_vars, defaults)
Exemple #8
0
from ops.db_utils import init_db, generate_combos, create_and_execute_daemons, prepare_settings
from ops.parameter_defaults import PaperDefaults
import sys

defaults = PaperDefaults()
print 'Initializing database'
init_db(sys.argv[1])
print 'Generating initial ' + sys.argv[2] + ' hyperparameter combos'
generate_combos(int(sys.argv[2]))
def run():
    defaults = PaperDefaults()

    #David's globals
    _DEFAULT_KW97_TILTEFFECT_DEGPERPIX = .45  # <OToole77>
    _DEFAULT_TILTEFFECT_SIZE = 101  #101
    _DEFAULT_KW97_TILTEFFECT_CSIZE = iround(3.6 /
                                            _DEFAULT_KW97_TILTEFFECT_DEGPERPIX)
    _DEFAULT_KW97_TILTEFFECT_NSIZE = iround(5.4 /
                                            _DEFAULT_KW97_TILTEFFECT_DEGPERPIX)
    _DEFAULT_KW97_TILTEFFECT_FSIZE = iround(10.7 /
                                            _DEFAULT_KW97_TILTEFFECT_DEGPERPIX)
    _DEFAULT_TILTEFFECT_CVAL = .5
    _DEFAULT_TILTEFFECT_SVALS = np.linspace(0.0, 0.5, 10)
    _DEFAULT_KW97_TILTEFFECT_SCALE = 1.25
    _DEFAULT_TILTEFFECT_NPOINTS = 25  #100
    _DEFAULT_TILTEFFECT_CIRCULAR = True
    _DEFAULT_TILTEFFECT_DECODER_TYPE = 'circular_vote'
    csvfiles = [
        os.path.join(defaults._DATADIR, 'KW97_GH.csv'),
        os.path.join(defaults._DATADIR, 'KW97_JHK.csv'),
        os.path.join(defaults._DATADIR, 'KW97_LL.csv'),
        os.path.join(defaults._DATADIR, 'KW97_SJL.csv'),
    ]

    # experiment parameters
    cpt = (_DEFAULT_TILTEFFECT_SIZE // 2, _DEFAULT_TILTEFFECT_SIZE // 2)
    spt = (_DEFAULT_TILTEFFECT_SIZE // 2,
           _DEFAULT_TILTEFFECT_SIZE // 2 + _DEFAULT_KW97_TILTEFFECT_CSIZE)
    dt_in = _DEFAULT_TILTEFFECT_CVAL - _DEFAULT_TILTEFFECT_SVALS

    # simulate populations
    im = sp.array([[
        stim.get_center_nfsurrounds(size=_DEFAULT_TILTEFFECT_SIZE,
                                    csize=_DEFAULT_KW97_TILTEFFECT_CSIZE,
                                    nsize=_DEFAULT_KW97_TILTEFFECT_NSIZE,
                                    fsize=_DEFAULT_KW97_TILTEFFECT_FSIZE,
                                    cval=_DEFAULT_TILTEFFECT_CVAL,
                                    nval=sp.nan,
                                    fval=sval,
                                    bgval=sp.nan)
    ] for sval in _DEFAULT_TILTEFFECT_SVALS])

    # get shifts for model for both papers, and from digitized data
    sortidx = sp.argsort(dt_in)  # re-order in increasing angular differences

    # O'Toole and Wenderoth (1977)
    n_subjects = len(csvfiles)
    ds_kw97_paper_x = sp.zeros((n_subjects, 9))
    ds_kw97_paper_y = sp.zeros((n_subjects, 9))

    for sidx, csv in enumerate(csvfiles):
        ds_kw97_paper_x[sidx], ds_kw97_paper_y[sidx] = \
            sp.genfromtxt(csv, delimiter=',').T

    ds_kw97_paper_x = (ds_kw97_paper_x + 360.) % 360. - 45.
    ds_kw97_paper_y = 45. - ds_kw97_paper_y

    for sidx in range(n_subjects):
        ds_kw97_paper_x[sidx] = ds_kw97_paper_x[sidx][sp.argsort(
            ds_kw97_paper_x[sidx])]

    extra_vars = {}
    extra_vars['scale'] = _DEFAULT_KW97_TILTEFFECT_SCALE
    extra_vars['decoder'] = _DEFAULT_TILTEFFECT_DECODER_TYPE
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['cval'] = _DEFAULT_TILTEFFECT_CVAL
    extra_vars['sortidx'] = sortidx
    extra_vars['cpt'] = cpt
    extra_vars['spt'] = spt
    extra_vars['sval'] = sval
    extra_vars['kind'] = 'circular'
    extra_vars['figure_name'] = 'f3b'
    extra_vars['return_var'] = 'O'
    extra_vars['hp_file'] = os.path.join(defaults._FIGURES, 'best_hps.npz')

    adjusted_gt = signal.resample(np.mean(ds_kw97_paper_y, axis=0), 10)
    optimize_model(im, adjusted_gt, extra_vars, defaults)
import sys
import numpy as np
import tensorflow as tf
import model_utils
from copy import deepcopy
from timeit import default_timer as timer
from model_defs.model_cpu_port_scan_optim import ContextualCircuit, _sgw, _sdw
import scipy as sp
from ops.db_utils import update_data, get_lesion_rows_from_db, count_sets
from model_utils import iceil
from ops.parameter_defaults import PaperDefaults

defaults = PaperDefaults().__dict__


def adjust_parameters(defaults, hps):
    hps_keys = hps.keys()
    for k in defaults._DEFAULT_PARAMETERS.iteritems():
        if k in hps_keys:
            defaults._DEFAULT_PARAMETERS[k] = hps[k]
    return defaults


def printProgress(iteration,
                  total,
                  prefix='',
                  suffix='',
                  decimals=1,
                  bar_length=100):
    str_format = "{0:." + str(decimals) + "f}"
    percents = str_format.format(100 * (iteration / float(total)))
Exemple #11
0
def run(make_stims=False):
    defaults = PaperDefaults()

    #David's globals
    test_colors = ['orange', 'turquoise']
    size = 77
    csize = 3
    ssize = None
    _MINPERPIX = 2.
    _DEFAULT_SM2003_CSIZE_MIN = 6.0
    _DEFAULT_SM2003_SIZE_MIN = 153.0
    _DEFAULT_SM2003_CPDS = sp.array([0., 1., 2., 3.3, 5., 10.])  # <from paper>
    _DEFAULT_SM2003_CPMS = _DEFAULT_SM2003_CPDS / 60.
    _DEFAULT_SM2003_CPSS = iround(1 + _DEFAULT_SM2003_CPMS * \
        (_DEFAULT_SM2003_SIZE_MIN - _DEFAULT_SM2003_CSIZE_MIN)/2.0) # <realistic>
    _DEFAULT_SM2003_CSIZE = iround(_DEFAULT_SM2003_CSIZE_MIN / _MINPERPIX)
    cpss = _DEFAULT_SM2003_CPSS

    #Use color parameters
    defaults._DEFAULT_PARAMETERS['continuous'] = False
    defaults._DEFAULT_PARAMETERS['srf'] = _DEFAULT_SM2003_CSIZE

    csvfiles = {
        'ObsML': {
            'PL': defaults._DATADIR + '/SM2003_Fig5_ObsML_PL.csv',
            'LP': defaults._DATADIR + '/SM2003_Fig5_ObsML_LP.csv',
        },
        'ObsPM': {
            'PL': defaults._DATADIR + '/SM2003_Fig5_ObsPM_PL.csv',
            'LP': defaults._DATADIR + '/SM2003_Fig5_ObsPM_LP.csv',
        },
    }

    _DEFAULT_SM2003_COLORS_RGB = {
        'orange': sp.array([.99, .65, .47]),
        'purple': sp.array([.65, .54, .77]),
        # adjusted to have Y=.30 #sp.array([.60, .49, .71]),
        'purple_alt': sp.array([.60, .48, .70]),
        'lime': sp.array([.56, .60, .49]),
        # adjusted to have Y=.30 #sp.array([.61, .65, .53]),
        'lime_alt': sp.array([.51, .61, .53]),
        'turquoise': sp.array([.44, .78, .73]),
        'neutral': sp.array([.66, .66, .66]),
        'lilac': sp.array([.89, .62, .80]),
        'citrus': sp.array([.67, 1.0, .50]),
        'EEW': sp.array([.58, .58, .58])
    }

    # O'Toole and Wenderoth (1977)
    # load digitzed data from original paper
    ########################################
    shift_phase_paper = sp.array([
        sp.genfromtxt(csvfiles['ObsML']['PL'], delimiter=',').T[1],
        sp.genfromtxt(csvfiles['ObsPM']['PL'], delimiter=',').T[1]
    ])
    shift_anti_paper = sp.array([
        sp.genfromtxt(csvfiles['ObsML']['LP'], delimiter=',').T[1],
        sp.genfromtxt(csvfiles['ObsPM']['LP'], delimiter=',').T[1]
    ])
    shift_phase_paper = np.mean(shift_phase_paper, axis=0)  #double check this
    shift_anti_paper = np.mean(shift_anti_paper, axis=0)
    gt = np.vstack((shift_phase_paper, shift_anti_paper))

    #Also preload regs for postprocessing
    regpath = os.path.join(defaults._WORKDIR,\
        'ShevellMonnier2003.reg.pkl.std.srf%issn%issf%i' \
            % (defaults._DEFAULT_PARAMETERS['srf'], defaults._DEFAULT_PARAMETERS['ssn'], defaults._DEFAULT_PARAMETERS['ssf']))

    reg_X_SO = joblib.load(regpath)['reg_X_SO']
    reg_Y_SO = joblib.load(regpath)['reg_Y_SO']
    reg_Z_SO = joblib.load(regpath)['reg_Z_SO']
    scaler_SO = joblib.load(regpath)['scaler_SO']

    reg_X_SX = joblib.load(regpath)['reg_X_SX']
    reg_Y_SX = joblib.load(regpath)['reg_Y_SX']
    reg_Z_SX = joblib.load(regpath)['reg_Z_SX']
    scaler_SX = joblib.load(regpath)['scaler_SX']

    so2image = cutils.get_XYZ2RGB_predictor(reg_X_SO, reg_Y_SO, reg_Z_SO,
                                            scaler_SO)
    sx2image = cutils.get_XYZ2RGB_predictor(reg_X_SX, reg_Y_SX, reg_Z_SX,
                                            scaler_SX)

    #Add to a dictionary
    extra_vars = {}
    extra_vars['n_cps'] = len(cpss)
    extra_vars['cpss'] = cpss
    extra_vars['test_colors'] = test_colors
    extra_vars['_DEFAULT_SM2003_COLORS_RGB'] = _DEFAULT_SM2003_COLORS_RGB
    extra_vars['size'] = size
    extra_vars['csize'] = csize
    extra_vars['ssize'] = ssize
    extra_vars['kind'] = 'circular'
    extra_vars['so2image'] = so2image
    extra_vars['sx2image'] = sx2image
    extra_vars['n_col'] = len(test_colors)
    extra_vars['figure_name'] = 'f7'
    extra_vars['return_var'] = 'O'
    extra_vars['hp_file'] = os.path.join(defaults._FIGURES, 'best_hps.npz')

    # measure shift for phase & antiphase inducers, for each test
    if make_stims:
        create_stimuli(gt, extra_vars, defaults)
    else:
        optimize_model(gt, extra_vars, defaults)
Exemple #12
0
def run():
    defaults = PaperDefaults()

    #David's globals
    size = 51
    mpp = 0.76  # 0.76 # 1.11
    scale = 0.23  # 0.23 # 0.22
    csv_file_x = os.path.join(defaults._DATADIR, 'WL1987_corrected_X.csv')
    csv_file_y = os.path.join(defaults._DATADIR, 'WL1987_corrected_Y.csv')

    # experiment parameters
    dd = (-150., 150.)  # in seconds of arc
    sec2u = lambda s: (s - dd[0]) / (dd[1] - dd[0])
    u2sec = lambda u: u * (dd[1] - dd[0]) + dd[0]
    min2pix = lambda m: iround(m / float(mpp))
    npoints = 50
    ndists = 10
    dists = sp.linspace(0.0, 12., ndists)
    lh, lw = 1, 4.
    ph, pw = 2., 2.
    center_disp = 0.0
    flanker_disp = -33.3
    mp0 = size // 2

    # Need to scale up the ecrfs
    defaults._DEFAULT_PARAMETERS[
        'srf'] = defaults._DEFAULT_PARAMETERS['srf'] * 2 - 1
    defaults._DEFAULT_PARAMETERS[
        'ssn'] = defaults._DEFAULT_PARAMETERS['ssn'] * 2 - 1
    defaults._DEFAULT_PARAMETERS[
        'ssf'] = defaults._DEFAULT_PARAMETERS['ssf'] * 2 - 1

    # simulate populations
    im = get_wl87_stim(size=size,
                       dists=min2pix(dists),
                       cval=sec2u(center_disp),
                       sval=sec2u(flanker_disp),
                       ch=min2pix(lh),
                       cw=min2pix(lw),
                       sh=min2pix(ph),
                       sw=min2pix(pw))

    # Get ground truth data
    paper_data_x = sp.genfromtxt(csv_file_x, delimiter=',')
    paper_data_y = sp.genfromtxt(csv_file_y, delimiter=',') * -1
    paper_fit_y = sfit(sp.linspace(dists.min(), dists.max(), 100),
                       paper_data_x,
                       sp.nanmean(paper_data_y, axis=0),
                       k=2,
                       t=[5.])
    paper_fit_y = paper_fit_y[np.round(
        np.linspace(0, paper_fit_y.shape[0] - 1, ndists)).astype(int)]

    extra_vars = {}
    extra_vars['scale'] = scale
    extra_vars['kind'] = 'gaussian'
    extra_vars['decoder'] = 'circular_vote'
    extra_vars['npoints'] = npoints
    extra_vars['cval'] = sec2u(center_disp)
    extra_vars['sval'] = sec2u(flanker_disp)
    extra_vars['figure_name'] = 'f5'
    extra_vars['u2sec'] = u2sec
    extra_vars['min2pix'] = min2pix
    extra_vars['dists'] = dists
    extra_vars['flanker_disp'] = flanker_disp
    extra_vars['mp0'] = mp0
    extra_vars['lh'] = lh
    extra_vars['pw'] = pw
    extra_vars['size'] = size
    extra_vars['gt_x'] = paper_data_x
    extra_vars['return_var'] = 'O'

    optimize_model(im, paper_fit_y, extra_vars, defaults)
Exemple #13
0
def run():
    defaults = PaperDefaults()

    #David's globals
    _DEFAULT_MURAKAMISHIMOJO96_DPP = 1.0/10.    # _PARAMETER_SET_VERSION == 'paper'
    _DEFAULT_MURAKAMISHIMOJO96_DPP = .125       # _PARAMETER_SET_VERSION == 'v2'

    _DEFAULT_MURAKAMISHIMOJO96_SIZE = 201 # 51 <works>
    _DEFAULT_MURAKAMISHIMOJO96_NTRIALS = 25 # 20 <works>
    _DEFAULT_MURAKAMISHIMOJO96_WS = sp.unique(sp.around(
        1.0 / _DEFAULT_MURAKAMISHIMOJO96_DPP * \
        sp.array([0.5, 0.75, 1., 1.5, 2., 3., 4., 5., 8.]))) # <paper> <FULL3_LARGE>
    _DEFAULT_MURAKAMISHIMOJO96_RDD = 0.50 # <works> | sp.pi**2/72 # <paper>
    _DEFAULT_MURAKAMISHIMOJO96_NUNITS = 32
    _DEFAULT_MURAKAMISHIMOJO96_NDIRS = 10
    _DEFAULT_MURAKAMISHIMOJO96_NCOH = 21
    _DEFAULT_MURAKAMISHIMOJO96_NCOH4FIT = 101
    _DEFAULT_MURAKAMISHIMOJO96_SCALE = 1.0
    _DEFAULT_MURAKAMISHIMOJO96_VALUE_UP = .80
    _DEFAULT_MURAKAMISHIMOJO96_VALUE_DOWN = .30
    _DEFAULT_MURAKAMISHIMOJO96_CSVS = {
        '%s' % (subj,): [os.path.join(defaults._DATADIR, 'MS96_%s_%g.csv') % (subj, deg) \
    for deg in [0, 2, 3, 4.5, 6, 9]] for subj in ['IM', 'SM']}

    # experiment parameters
    w_paper, PSE_paper = [], [],
    w_paper_subs, PSE_paper_subs = [], []
    for idx, subject in enumerate(_DEFAULT_MURAKAMISHIMOJO96_CSVS.values()):
        for jdx, csvfile in enumerate(subject):
            w_, pse_ = sp.genfromtxt(csvfile, delimiter=',').T
            w_paper += w_.tolist()
            PSE_paper += pse_.tolist()
            w_paper_subs.append(w_)
            PSE_paper_subs.append(pse_)
    gt_data = np.zeros((len(PSE_paper_subs),len(PSE_paper_subs[0])));
    max_size = np.max(np.asarray([len(x) for x in PSE_paper_subs]))

    #Assign to 9 bins np.concatenate(w_paper_subs)
    #Take the average within each bin as the GT

    for idx,g in enumerate(PSE_paper_subs):
        if len(g) < max_size:
            g = signal.resample(g,max_size)
        gt_data[idx,:] = g
    gt = np.mean(gt_data,axis=0)
    #
    extra_vars = {}
    extra_vars['ws'] = _DEFAULT_MURAKAMISHIMOJO96_WS 
    extra_vars['rdd'] = _DEFAULT_MURAKAMISHIMOJO96_RDD
    extra_vars['size'] = _DEFAULT_MURAKAMISHIMOJO96_SIZE
    extra_vars['ntrials'] = _DEFAULT_MURAKAMISHIMOJO96_NTRIALS
    extra_vars['nunits'] = _DEFAULT_MURAKAMISHIMOJO96_NUNITS
    extra_vars['ncoh'] = _DEFAULT_MURAKAMISHIMOJO96_NCOH
    extra_vars['scale'] = _DEFAULT_MURAKAMISHIMOJO96_SCALE
    extra_vars['ndirs'] = _DEFAULT_MURAKAMISHIMOJO96_NDIRS
    extra_vars['value_up'] = _DEFAULT_MURAKAMISHIMOJO96_VALUE_UP
    extra_vars['value_down'] = _DEFAULT_MURAKAMISHIMOJO96_VALUE_DOWN
    extra_vars['ncoh4fit'] = _DEFAULT_MURAKAMISHIMOJO96_NCOH4FIT
    extra_vars['kind'] = 'circular'
    extra_vars['figure_name'] = 'f6'
    extra_vars['return_var'] = 'O'
    extra_vars['hp_file'] = os.path.join(defaults._FIGURES, 'best_hps.npz')

    optimize_model(gt,extra_vars,defaults)
Exemple #14
0
import sys
import numpy as np
import scipy as sp
from scipy import stats
import tensorflow as tf
from copy import deepcopy
sys.path.append('../')
from ops.parameter_defaults import PaperDefaults
from tf_helper_functions import *
from collections import namedtuple

defaults = PaperDefaults().__dict__
module = sys.modules[__name__]  # add to the global namespace
for name, value in defaults.iteritems():
    setattr(module, name, value)
CircuitParameters = namedtuple('CircuitParameters',
                               defaults['_DEFAULT_PARAMETERS'].keys(),
                               verbose=False,
                               rename=False)
_DEFAULT_PARAMETERS_TEMPLATE = deepcopy(defaults['_DEFAULT_PARAMETERS'])


def sampler(x):
    return abs(np.random.uniform(low=x - 1, high=x + 1) +
               x)**np.random.uniform(low=-2., high=2.)  # previously did [0, 2]


def makeGaussian(size, fwhm=3, center=None):
    """ Make a square gaussian kernel.
    size is the length of a side of the square
    fwhm is full-width-half-maximum, which
Exemple #15
0
def run(initialize_model=False):
    defaults = PaperDefaults()

    #David's globals
    _DEFAULT_KW2015_SO_PARAMETERS = {
        'filters': {
            'name': 'gabors',
            'aspect_ratio': .6,
            'sizes': sp.array([9]),
            'spatial_frequencies': sp.array([[9.0]]),
            'orientations': sp.arange(2) * sp.pi / 2,
            'phases': sp.array([0]),
            'with_center_surround': False,
            'padding': 'reflect',
            'corr': False,
            'ndp': False
        },
        'model': {
            'channels_so': ('R+G-', 'B+Y-', 'R+C-', 'Wh+Bl-', 'G+R-', 'Y+B-',
                            'C+R-', 'Bl+Wh-'),
            'normalize':
            False
        },
        'dnp_so': None,
        'selected_channels': [0, 1, 3, 4, 5, 7],
        'norm_channels': [0, 1, 3, 4, 5, 7]
    }

    size = 51
    csize = 9
    n_train = 32
    n_t_hues = 16
    n_s_hues = 16
    csvfiles = [
        defaults._DATADIR + '/KW2015_%i.csv' % (i, )
        for i in range(0, 360, 45)
    ]

    #Load data from experiments
    kw2015_fig2_x = sp.zeros((len(csvfiles), 16))
    kw2015_fig2_y = sp.zeros((len(csvfiles), 16))
    for idx, csv in enumerate(csvfiles):
        kw2015_fig2_x[idx], kw2015_fig2_y[idx] = \
            sp.genfromtxt(csv, delimiter=',')[1:].T

    # experiment stimuli
    extra_vars = {}
    extra_vars['_DEFAULT_KW2015_SO_PARAMETERS'] = _DEFAULT_KW2015_SO_PARAMETERS
    extra_vars['_DEFAULT_FLOATX_NP'] = defaults._DEFAULT_FLOATX_NP
    extra_vars['size'] = size
    extra_vars['csize'] = csize
    extra_vars['n_train'] = n_train
    extra_vars['n_t_hues'] = n_t_hues
    extra_vars['n_s_hues'] = n_s_hues
    extra_vars['figure_name'] = 'f4'
    extra_vars['gt_x'] = kw2015_fig2_x
    extra_vars['f4_stimuli_file'] = defaults.f4_stimuli_file
    extra_vars['return_var'] = 'I'
    extra_vars['precalculated_x'] = True
    extra_vars['aux_y'] = []
    extra_vars['percent_reg_train'] = 80.

    if initialize_model:
        create_stims(extra_vars)
    stim_files = np.load(extra_vars['f4_stimuli_file'])
    extra_vars['stims_all_lms'] = stim_files['stims_all_lms']

    #Run model
    #cx.run(so_all, from_gpu=False)
    #sx_all[:] = cx.Y.get()[:, :, size//2, size//2]
    adj_gt = np.mean(kw2015_fig2_y, axis=0)
    im = stim_files['so_ind'].reshape(
        n_t_hues * n_s_hues,
        len(_DEFAULT_KW2015_SO_PARAMETERS['norm_channels']), size, size)
    extra_vars['aux_data'] = stim_files['so_all'].transpose(0, 2, 3, 1)
    extra_vars['cs_hue_diff'] = stim_files['cs_hue_diff']

    optimize_model(im, adj_gt, extra_vars, defaults)
    zs = np.zeros((len(lesion_data)))
    mus = np.zeros((len(lesion_data)))
    for idx, row in enumerate(lesion_data):
        figure_fits = [row[k] for k in defaults.db_problem_columns]
        # if None in figure_fits:
        #   raise TypeError
        # summarize with a z score
        zs[idx] = np.nanmean(figure_fits) / np.nanstd(figure_fits)
        mus[idx] = np.nanmax(figure_fits)
    if len(zs) > 0:
        return lesion_data[np.argmax(zs)], lesion_data, zs, mus
    else:
        return None, None, None, None


defaults = PaperDefaults()
max_row = {}
lesion_dict = {}
score_dict = {}
mus = {}
for lesion in defaults.lesions:
    lesion_data = get_all_lesion_data(lesion)
    it_max, lesion_dict[lesion], score_dict[lesion], mus[
        lesion] = find_best_fit(lesion_data, defaults)
    if it_max is not None:
        it_max = dict(it_max)
    max_row[lesion] = it_max
    # bootstrap here
max_zs = {k: v.max() for k, v in score_dict.iteritems()}
# Reorder the dimensions
t_stats = {}