def __init__(self, dt=100, rewards=None, timing=None, sigma=1.0):
        super().__init__(dt=dt)
        self.choices = [1, 2]
        self.cues = [0, 1]

        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 500,
            'sample': 500,
            'delay1': 500,
            'cue1': 500,
            'test1': 500,
            'delay2': 500,
            'cue2': 500,
            'test2': 500}
        if timing:
            self.timing.update(timing)

        self.abort = False

        name = {'fixation': 0, 'stimulus1': range(1, 3),
                'stimulus2': range(3, 5), 'cue1': 5, 'cue2': 6}
        self.observation_space = spaces.Box(-np.inf, np.inf, shape=(7,),
                                            dtype=np.float32, name=name)
        name = {'fixation': 0, 'match': 1, 'non-match': 2}
        self.action_space = spaces.Discrete(3, name=name)
Exemple #2
0
    def __init__(self, dt=80, rewards=None, timing=None, gain=1,
                 prod_margin=0.2):
        super().__init__(dt=dt)
        self.prod_margin = prod_margin

        self.gain = gain

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 100,
            'ready': 83,
            'measure': lambda: self.rng.uniform(800, 1500),
            'set': 83}
        if timing:
            self.timing.update(timing)

        self.abort = False
        # set action and observation space
        name = {'fixation': 0, 'ready': 1, 'set': 2}
        self.observation_space = spaces.Box(-np.inf, np.inf, shape=(3,),
                                            dtype=np.float32, name=name)

        name = {'fixation': 0, 'go': 1}
        self.action_space = spaces.Discrete(2, name=name)  # (fixate, go)
Exemple #3
0
    def __init__(self, dt=100, anti=True, rewards=None, timing=None,
                 dim_ring=16, reaction=False):
        super().__init__(dt=dt)

        self.anti = anti
        self.reaction = reaction

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 500,
            'stimulus': 500,
            'delay': 0,
            'decision': 500}
        if timing:
            self.timing.update(timing)

        self.abort = False

        # action and observation spaces
        self.dim_ring = dim_ring
        self.theta = np.arange(0, 2 * np.pi, 2 * np.pi / dim_ring)
        self.choices = np.arange(dim_ring)

        name = {'fixation': 0, 'stimulus': range(1, dim_ring + 1)}
        self.observation_space = spaces.Box(
            -np.inf, np.inf, shape=(1+dim_ring,), dtype=np.float32, name=name)

        name = {'fixation': 0, 'choice': range(1, dim_ring + 1)}
        self.action_space = spaces.Discrete(1+dim_ring, name=name)
Exemple #4
0
    def __init__(self, dt=100, rewards=None, timing=None, sigma=1):
        super().__init__(dt=dt)
        # Possible decisions at the end of the trial
        self.choices = [1, 2]  # e.g. [left, right]
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Optional rewards dictionary
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        # Optional timing dictionary
        # if provided, self.add_period can infer timing directly
        self.timing = {
            'fixation': 100,
            'stimulus': 2000,
            'delay': 0,
            'decision': 100
        }
        if timing:
            self.timing.update(timing)

        # Similar to gym envs, define observations_space and action_space
        # Optional annotation of the observation space
        name = {'fixation': 0, 'stimulus': [1, 2]}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(3, ),
                                            dtype=np.float32,
                                            name=name)
        # Optional annotation of the action space
        name = {'fixation': 0, 'choice': [1, 2]}
        self.action_space = spaces.Discrete(3, name=name)
Exemple #5
0
    def __init__(self, dt=100, rewards=None, timing=None, sigma=1.0):
        super().__init__(dt=dt)
        self.choices = [0, 1]
        # trial conditions
        self.pairs = [(1, 3), (1, 4), (2, 3), (2, 4)]
        self.association = 0  # GO if np.diff(self.pair)[0]%2==self.association
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise
        # Durations (stimulus duration will be drawn from an exponential)

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': -1., 'miss': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 0,
            'stim1': 1000,
            'delay_btw_stim': 1000,
            'stim2': 1000,
            'delay_aft_stim': 1000,
            'decision': 500
        }
        if timing:
            self.timing.update(timing)

        self.abort = False
        # action and observation spaces
        name = {'fixation': 0, 'stimulus': range(1, 5)}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(5, ),
                                            dtype=np.float32,
                                            name=name)

        self.action_space = spaces.Discrete(2, name={'fixation': 0, 'go': 1})
Exemple #6
0
    def __init__(self, dt=100, rewards=None, timing=None, dim_ring=16):
        super().__init__(dt=dt)
        # Rewards
        self.rewards = {'correct': +1., 'fail': -0.1}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {'fixation': 500, 'reach': 500}
        if timing:
            self.timing.update(timing)

        # action and observation spaces
        name = {
            'self': range(dim_ring, 2 * dim_ring),
            'target': range(dim_ring)
        }
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(2 * dim_ring, ),
                                            dtype=np.float32,
                                            name=name)
        name = {'fixation': 0, 'left': 1, 'right': 2}
        self.action_space = spaces.Discrete(3, name=name)

        self.theta = np.arange(0, 2 * np.pi, 2 * np.pi / dim_ring)
        self.state = np.pi
        self.dim_ring = dim_ring
    def __init__(self, dt=100, rewards=None, timing=None, sigma=1.0,
                 dim_ring=2):
        super().__init__(dt=dt)
        self.choices = ['match', 'non-match']  # match, non-match

        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 500,
            'sample': 650,
            'first_delay': 1000,
            'test': 650}

        if timing:
            self.timing.update(timing)

        self.abort = False

        self.theta = np.linspace(0, 2 * np.pi, dim_ring + 1)[:-1]

        name = {'fixation': 0, 'stimulus': range(1, dim_ring + 1)}
        self.observation_space = spaces.Box(
            -np.inf, np.inf, shape=(1 + dim_ring,), dtype=np.float32, name=name)

        name = {'fixation': 0, 'match': 1, 'non-match': 2}
        self.action_space = spaces.Discrete(3, name=name)
    def __init__(self, dt=80, rewards=None, timing=None):
        super().__init__(dt=dt)
        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 300,
            'stim1': lambda: self.rng.uniform(300, 600),
            'delay1': lambda: self.rng.uniform(800, 1500),
            'stim2': lambda: self.rng.uniform(300, 600),
            'delay2': 500,
            'decision': 300
        }
        if timing:
            self.timing.update(timing)

        self.abort = False

        name = {'fixation': 0, 'stim1': 1, 'stim2': 2}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(3, ),
                                            dtype=np.float32,
                                            name=name)
        name = {'fixation': 0, 'choice1': 1, 'choice2': 2}
        self.action_space = spaces.Discrete(3, name=name)
Exemple #9
0
    def __init__(self, dt=100, rewards=None, timing=None):
        super().__init__(dt=dt)
        # Actions (fixate, go)
        self.actions = [0, 1]
        # trial conditions
        self.choices = [0, 1]

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': -0.5, 'miss': -0.5}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 0,
            'stimulus': 500,
            'delay': 500,
            'decision': 500}
        if timing:
            self.timing.update(timing)

        self.abort = False
        # set action and observation spaces
        name = {'fixation': 0, 'nogo': 1, 'go': 2}
        self.observation_space = spaces.Box(-np.inf, np.inf, shape=(3,),
                                            dtype=np.float32, name=name)
        self.action_space = spaces.Discrete(2, {'fixation': 0, 'go': 1})
Exemple #10
0
    def __init__(self, dt=80, rewards=None, timing=None, prod_margin=0.2):
        super().__init__(dt=dt)

        self.prod_margin = prod_margin

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': ngym.random.TruncExp(400, 100, 800, rng=self.rng),
            'target': ngym.random.TruncExp(1000, 500, 1500, rng=self.rng),
            's1': 100,
            'interval1': (600, 700, 800, 900, 1000),
            's2': 100,
            'interval2': 0,
            's3': 100,
            'interval3': 0,
            'response': 1000
        }
        if timing:
            self.timing.update(timing)

        self.abort = False
        # set action and observation space
        name = {'fixation': 0, 'stimulus': 1, 'target': 2}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(3, ),
                                            dtype=np.float32,
                                            name=name)
        name = {'fixation': 0, 'go': 1}
        self.action_space = spaces.Discrete(2, name=name)
Exemple #11
0
    def __init__(self, dt=80, rewards=None, timing=None, prod_margin=0.2):
        super().__init__(dt=dt)
        self.prod_margin = prod_margin
        self.production_ind = [0, 1]
        self.intervals = [800, 1500]

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 500,  # XXX: not specified
            'cue': lambda: self.rng.uniform(1000, 3000),
            'set': 50
        }
        if timing:
            self.timing.update(timing)

        self.abort = False
        # set action and observation space
        self.action_space = spaces.Discrete(2)  # (fixate, go)
        # Fixation, Interval indicator x2, Set
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(4, ),
                                            dtype=np.float32)
Exemple #12
0
    def __init__(self, dt=100, rewards=None, timing=None, sigma=1.0):
        super().__init__(dt=dt)

        # trial conditions
        self.contexts = [0, 1]  # index for context inputs
        self.choices = [1, 2]  # left, right choice
        self.cohs = [5, 15, 50]
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 300,
            # 'target': 350,
            'stimulus': 750,
            'delay': ngym.random.TruncExp(600, 300, 3000),
            'decision': 100}
        if timing:
            self.timing.update(timing)

        self.abort = False

        # set action and observation space
        names = ['fixation', 'stim1_mod1', 'stim2_mod1',
                 'stim1_mod2', 'stim2_mod2', 'context1', 'context2']
        name = {name: i for i, name in enumerate(names)}
        self.observation_space = spaces.Box(-np.inf, np.inf, shape=(7,),
                                            dtype=np.float32, name=name)

        name = {'fixation': 0, 'choice1': 1, 'choice2': 2}
        self.action_space = spaces.Discrete(3, name=name)
Exemple #13
0
    def __init__(self,
                 dt=10,
                 rewards=None,
                 timing=None,
                 p_pulse=(0.3, 0.7),
                 n_bin=6):
        super().__init__(dt=dt)
        self.p_pulse = p_pulse
        self.n_bin = n_bin

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {'fixation': 500, 'decision': 500}
        for i in range(n_bin):
            self.timing['cue' + str(i)] = 10
            self.timing['bin' + str(i)] = 240
        if timing:
            self.timing.update(timing)

        self.abort = False

        name = {'fixation': 0, 'stimulus': [1, 2]}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(3, ),
                                            dtype=np.float32,
                                            name=name)
        name = {'fixation': 0, 'choice': [1, 2]}
        self.action_space = spaces.Discrete(3, name=name)
Exemple #14
0
    def __init__(self,
                 dt=100,
                 rewards=None,
                 timing=None,
                 sigma=1.,
                 stim_scale=1.,
                 n_ch=3,
                 ob_nch=False,
                 zero_irrelevant_stim=False,
                 ob_histblock=False,
                 cohs=[0, 6.4, 12.8, 25.6, 51.2]):

        super().__init__(dt=dt)
        self.n = n_ch
        self.choices = np.arange(n_ch)
        self.ob_nch = ob_nch
        self.ob_histblock = ob_histblock
        self.zero_irrelevant_stim = zero_irrelevant_stim
        assert isinstance(n_ch, int), 'n_ch must be integer'
        assert n_ch > 1, 'n_ch must be at least 2'
        assert isinstance(ob_histblock, bool), 'ob_histblock \
                                                must be True/False'

        assert isinstance(ob_nch, bool), 'ob_nch \
                                                must be True/False'

        self.cohs = np.array(cohs) * stim_scale
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)
        self.timing = {
            'fixation': 500,
            'stimulus': ngym.random.TruncExp(330, 80, 1500, rng=self.rng),
            'decision': 500
        }
        if timing:
            self.timing.update(timing)

        self.abort = False

        # Action and observation spaces
        name = {'fixation': 0, 'stimulus': range(1, n_ch + 1)}
        if ob_nch:
            name.update({'Active choices': n_ch + ob_nch})
        if ob_histblock:
            name.update({'Current block': n_ch + ob_nch + ob_histblock})
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(n_ch + ob_nch +
                                                   ob_histblock + 1, ),
                                            dtype=np.float32,
                                            name=name)

        name = {'fixation': 0, 'choice': range(1, n_ch + 1)}
        self.action_space = spaces.Discrete(n_ch + 1, name=name)
Exemple #15
0
    def __init__(self, dt=100, rewards=None, timing=None, sigma=1.0, cohs=None,
                 dim_ring=16, w_mod=(1, 1), stim_mod=(True, True),
                 delaycomparison=True):
        super().__init__(dt=dt)

        # trial conditions
        if cohs is None:
            self.cohs = np.array([0.08, 0.16, 0.32])
        else:
            self.cohs = cohs
        self.w_mod1, self.w_mod2 = w_mod
        self.stim_mod1, self.stim_mod2 = stim_mod
        self.delaycomparison = delaycomparison

        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        if self.delaycomparison:
            self.timing = {
                'fixation': lambda: self.rng.uniform(200, 500),
                'stim1': 500,
                'delay': 1000,
                'stim2': 500,
                'decision': 200}
        else:
            self.timing = {
                'fixation': lambda: self.rng.uniform(200, 500),
                'stimulus': 500,
                'decision': 200}
        if timing:
            self.timing.update(timing)

        self.abort = False

        # action and observation space
        self.theta = np.linspace(0, 2*np.pi, dim_ring+1)[:-1]
        self.choices = np.arange(dim_ring)

        if dim_ring < 2:
            raise ValueError('dim ring can not be smaller than 2')

        name = {
            'fixation': 0,
            'stimulus_mod1': range(1, dim_ring + 1),
            'stimulus_mod2': range(dim_ring + 1, 2 * dim_ring + 1)}
        self.observation_space = spaces.Box(
            -np.inf, np.inf, shape=(1 + 2 * dim_ring,),
            dtype=np.float32, name=name)
        name = {'fixation': 0, 'choice': range(1, dim_ring + 1)}
        self.action_space = spaces.Discrete(1+dim_ring, name=name)
Exemple #16
0
    def __init__(self,
                 dt=100,
                 rewards=None,
                 timing=None,
                 lowbound=0.,
                 highbound=1.):
        super().__init__(dt=dt)
        self.lowbound = lowbound
        self.highbound = highbound

        # Rewards
        self.rewards = {
            'abort': -0.1,
            'correct': +1.,
            'fail': -0.,
            'miss': -0.5
        }
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'stimulus': 500,
            'delay': (0, 1000, 2000),
            'decision': 500
        }
        if timing:
            self.timing.update(timing)

        self.r_tmax = self.rewards['miss']
        self.abort = False

        name = {'go': 0, 'stimulus': 1}
        self.observation_space = spaces.Box(low=np.array([0., -2]),
                                            high=np.array([1, 2.]),
                                            dtype=np.float32,
                                            name=name)

        self.action_space = spaces.Box(low=np.array((-1.0, -1.0)),
                                       high=np.array((1.0, 2.0)),
                                       dtype=np.float32)
Exemple #17
0
 def __init__(self, env, modality=0, n_modality=1):
     super().__init__(env)
     self.modality = modality
     if 'stimulus' not in self.task.observation_space.name:
         raise KeyError('observation_space does not have name stimulus')
     ind_stimulus = np.array(self.task.observation_space.name['stimulus'])
     len_stimulus = len(ind_stimulus)
     ob_space = self.task.observation_space
     ob_shape = ob_space.shape[0] + (n_modality - 1) * len_stimulus
     # Shift stimulus
     name = {'stimulus': ind_stimulus + len_stimulus * modality}
     self.observation_space = self.task.observation_space = spaces.Box(
         -np.inf, np.inf, shape=(ob_shape,), dtype=ob_space.dtype, name=name)
Exemple #18
0
    def __init__(self,
                 dt=100,
                 rewards=None,
                 timing=None,
                 shape_weight=None,
                 n_loc=4):
        super().__init__(dt=dt)
        # The evidence weight of each stimulus
        if shape_weight is not None:
            self.shape_weight = shape_weight
        else:
            self.shape_weight = [
                -10, -0.9, -0.7, -0.5, -0.3, 0.3, 0.5, 0.7, 0.9, 10
            ]

        self.n_shape = len(self.shape_weight)
        dim_shape = self.n_shape
        # Shape representation needs to be fixed cross-platform
        self.shapes = np.eye(self.n_shape, dim_shape)
        self.n_loc = n_loc

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 500,
            'delay': lambda: self.rng.uniform(450, 550),
            'decision': 500
        }
        for i_loc in range(n_loc):
            self.timing['stimulus' + str(i_loc)] = 500
        if timing:
            self.timing.update(timing)

        self.abort = False

        name = {'fixation': 0}
        start = 1
        for i_loc in range(n_loc):
            name['loc' + str(i_loc)] = range(start, start + dim_shape)
            start += dim_shape
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(1 + dim_shape * n_loc, ),
                                            dtype=np.float32,
                                            name=name)

        name = {'fixation': 0, 'choice': [1, 2]}
        self.action_space = spaces.Discrete(3, name=name)
Exemple #19
0
    def __init__(self,
                 dt=100,
                 rewards=None,
                 timing=None,
                 sigma=0.1,
                 stim_scale=1.,
                 n_ch=10,
                 n_stims=2):

        super().__init__(dt=dt)
        self.n_ch = n_ch
        self.choices = np.arange(n_stims)
        self.stims = np.array(list(itertools.product([0, 1],
                                                     repeat=n_ch))).T == 1
        self.stims = self.stims[:,
                                np.random.choice(self.stims.shape[1],
                                                 size=n_stims,
                                                 replace=False)]
        assert isinstance(n_ch, int), 'n_ch must be integer'
        assert isinstance(n_stims, int), 'n_stims must be integer'
        assert n_stims > 1, 'n_stims must be at least 2'
        # The strength of evidence, modulated by stim_scale.
        self.cohs = np.array([100.]) * stim_scale
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)
        self.timing = {
            'fixation': 500,
            'stimulus': 500,
            'delay': 1000,
            'decision': 500
        }
        if timing:
            self.timing.update(timing)

        self.abort = False

        # Action and observation spaces
        name = {'fixation': 0, 'stimulus': range(1, n_ch + 1)}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(n_ch + 1, ),
                                            dtype=np.float32,
                                            name=name)
        self.mapping = np.arange(n_stims)
        self.unwrapped.rng.shuffle(self.mapping)
        name = {'fixation': 0, 'choice': range(1, n_stims + 1)}
        self.action_space = spaces.Discrete(n_stims + 1, name=name)
Exemple #20
0
    def __init__(self,
                 dt=100,
                 rewards=None,
                 timing=None,
                 sigma=1.0,
                 delay=None,
                 stim_dur=100):
        super().__init__(dt=dt)
        # Possible decisions at the end of the trial
        self.choices = [0, 1]

        self.sigma = sigma / np.sqrt(self.dt)  # Input noise
        self.delay = delay
        self.stim_dur = int(stim_dur / self.dt)  # in steps should be greater
        # than 1 stp else it wont have enough time to respond within the window
        if self.stim_dur == 1:
            self.extra_step = 1
            if delay is None:
                warnings.warn('Added an extra stp after the actual stimulus,' +
                              ' else model will not be able to respond ' +
                              'within response window (stimulus epoch)')
        else:
            self.extra_step = 0

        if self.stim_dur < 1:
            warnings.warn('Stimulus duration shorter than dt')

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': -1., 'miss': -1}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 500,
            'stimulus': ngym.random.TruncExp(1000, 500, 1500)
        }
        if timing:
            self.timing.update(timing)

        # whether to abort (T) or not (F) the trial when breaking fixation:
        self.abort = False

        name = {'fixation': 0, 'stimulus': 1}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(2, ),
                                            dtype=np.float32,
                                            name=name)

        self.action_space = spaces.Discrete(2, name={'fixation': 0, 'go': 1})
    def __init__(self,
                 dt=100,
                 context=0,
                 rewards=None,
                 timing=None,
                 sigma=1.0,
                 dim_ring=2):
        super().__init__(dt=dt)

        # trial conditions
        self.choices = [1, 2]  # left, right choice
        self.cohs = [5, 15, 50]
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise
        self.context = context

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 300,
            # 'target': 350,
            'stimulus': 750,
            'delay': ngym.random.TruncExp(600, 300, 3000, rng=self.rng),
            'decision': 100
        }
        if timing:
            self.timing.update(timing)

        self.abort = False

        # set action and observation space
        self.theta = np.linspace(0, 2 * np.pi, dim_ring + 1)[:-1]
        self.choices = np.arange(dim_ring)

        name = {
            'fixation': 0,
            'stimulus_mod1': range(1, dim_ring + 1),
            'stimulus_mod2': range(dim_ring + 1, 2 * dim_ring + 1)
        }
        shape = (1 + 2 * dim_ring, )
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=shape,
                                            dtype=np.float32,
                                            name=name)

        name = {'fixation': 0, 'choice': range(1, dim_ring + 1)}
        self.action_space = spaces.Discrete(1 + dim_ring, name=name)
    def __init__(self,
                 dt=100,
                 vpairs=None,
                 rewards=None,
                 timing=None,
                 sigma=1.0):
        super().__init__(dt=dt)

        # Pair of stimulus strengthes
        if vpairs is None:
            self.vpairs = [(18, 10), (22, 14), (26, 18), (30, 22), (34, 26)]
        else:
            self.vpairs = vpairs

        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 500,
            'stimulus1': 500,
            'delay': 1000,
            'stimulus2': 500,
            'decision': 100
        }
        if timing:
            self.timing.update(timing)

        self.abort = False

        # Input scaling
        self.vall = np.ravel(self.vpairs)
        self.vmin = np.min(self.vall)
        self.vmax = np.max(self.vall)

        # action and observation space
        name = {'fixation': 0, 'stimulus': 1}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(2, ),
                                            dtype=np.float32,
                                            name=name)
        name = {'fixation': 0, 'choice': [1, 2]}
        self.action_space = spaces.Discrete(3, name=name)

        self.choices = [1, 2]
Exemple #23
0
    def __init__(self, dt=100, rewards=None, timing=None):
        super().__init__(dt=dt)

        # trial conditions
        self.B_to_A = 1 / 2.2
        self.juices = [('a', 'b'), ('b', 'a')]
        self.offers = [(0, 1), (1, 3), (1, 2), (1, 1), (2, 1), (3, 1), (4, 1),
                       (6, 1), (2, 0)]

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +0.22}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 1500,
            'offer_on': lambda: self.rng.uniform(1000, 2000),
            'decision': 750
        }
        if timing:
            self.timing.update(timing)

        self.R_B = self.B_to_A * self.rewards['correct']
        self.R_A = self.rewards['correct']
        self.abort = False
        # Increase initial policy -> baseline weights
        self.baseline_Win = 10

        name = {
            'fixation': 0,
            'a1': 1,
            'b1': 2,  # a or b for choice 1
            'a2': 3,
            'b2': 4,  # a or b for choice 2
            'n1': 5,
            'n2': 6  # amount for choice 1 or 2
        }
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(7, ),
                                            dtype=np.float32,
                                            name=name)

        self.act_dict = {'fixation': 0, 'choice1': 1, 'choice2': 2}
        self.action_space = spaces.Discrete(3, name=self.act_dict)
Exemple #24
0
    def __init__(self, dt=100, n=2, p=(.5, .5), rewards=None, timing=None):
        super().__init__(dt=dt)
        if timing is not None:
            print('Warning: Bandit task does not require timing variable.')

        if rewards:
            self.rewards = rewards
        else:
            self.rewards = np.ones(n)  # 1 for every arm

        self.n = n
        self.p = np.array(p)  # Reward probabilities

        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(1, ),
                                            dtype=np.float32)
        self.action_space = spaces.Discrete(n)
Exemple #25
0
    def __init__(self, dt=100, rewards=None, timing=None):
        super().__init__(dt=dt)
        # Rewards
        self.rewards = {'correct': +1., 'fail': -0.1}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {'fixation': 500, 'reach': 500}
        if timing:
            self.timing.update(timing)

        # action and observation spaces
        self.action_space = spaces.Discrete(3)
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(32, ),
                                            dtype=np.float32)
        self.theta = np.arange(0, 2 * np.pi, 2 * np.pi / 32)
        self.state = np.pi
Exemple #26
0
    def __init__(self,
                 dt=100,
                 rewards=None,
                 timing=None,
                 dim_ring=2,
                 sigma=1.0):
        super().__init__(dt=dt)

        self.wagers = [True, False]
        self.theta = np.linspace(0, 2 * np.pi, dim_ring + 1)[:-1]
        self.choices = np.arange(dim_ring)
        self.cohs = [0, 3.2, 6.4, 12.8, 25.6, 51.2]
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)
        self.rewards['sure'] = 0.7 * self.rewards['correct']

        self.timing = {
            'fixation': 100,
            # 'target':  0,
            'stimulus': ngym.random.TruncExp(180, 100, 900),
            'delay': ngym.random.TruncExp(1350, 1200, 1800),
            'pre_sure': lambda: self.rng.uniform(500, 750),
            'decision': 100
        }
        if timing:
            self.timing.update(timing)

        self.abort = False

        # set action and observation space
        name = {'fixation': 0, 'stimulus': [1, 2], 'sure': 3}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(4, ),
                                            dtype=np.float32,
                                            name=name)
        name = {'fixation': 0, 'choice': [1, 2], 'sure': 3}
        self.action_space = spaces.Discrete(4, name=name)
Exemple #27
0
    def __init__(self,
                 dt=100,
                 rewards=None,
                 timing=None,
                 cohs=None,
                 sigma=1.0,
                 dim_ring=2):
        super().__init__(dt=dt)
        if cohs is None:
            self.cohs = np.array([0, 6.4, 12.8, 25.6, 51.2])
        else:
            self.cohs = cohs
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 100,
            'stimulus': 2000,
            'delay': 0,
            'decision': 100
        }
        if timing:
            self.timing.update(timing)

        self.abort = False

        self.theta = np.linspace(0, 2 * np.pi, dim_ring + 1)[:-1]
        self.choices = np.arange(dim_ring)

        name = {'fixation': 0, 'stimulus': range(1, dim_ring + 1)}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(1 + dim_ring, ),
                                            dtype=np.float32,
                                            name=name)
        name = {'fixation': 0, 'choice': range(1, dim_ring + 1)}
        self.action_space = spaces.Discrete(1 + dim_ring, name=name)
Exemple #28
0
    def __init__(self, dt=100, rewards=None, timing=None, sigma=1.0,
                 dim_ring=16, matchto='sample', matchgo=True):
        super().__init__(dt=dt)
        self.matchto = matchto
        if self.matchto not in ['sample', 'category']:
            raise ValueError('Match has to be either sample or category')
        self.matchgo = matchgo
        self.choices = ['match', 'non-match']  # match, non-match

        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 300,
            'sample': 500,
            'delay': 1000,
            'test': 500,
            'decision': 900}
        if timing:
            self.timing.update(timing)

        self.abort = False

        if np.mod(dim_ring, 2) != 0:
            raise ValueError('dim ring should be an even number')
        self.dim_ring = dim_ring
        self.half_ring = int(self.dim_ring/2)
        self.theta = np.linspace(0, 2 * np.pi, dim_ring + 1)[:-1]

        name = {'fixation': 0, 'stimulus': range(1, dim_ring + 1)}
        self.observation_space = spaces.Box(
            -np.inf, np.inf, shape=(1 + dim_ring,), dtype=np.float32, name=name)
        name = {'fixation': 0, 'choice': range(1, dim_ring + 1)}
        self.action_space = spaces.Discrete(1+dim_ring, name=name)
    def __init__(self, dt=100, rewards=None, timing=None):
        super().__init__(dt=dt)
        self.choices = [0, 1]

        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': ngym.random.TruncExp(600, 400, 800, rng=self.rng),
            'rule_target': 1000,
            'fixation2': ngym.random.TruncExp(600, 400, 900, rng=self.rng),
            'flash1': 100,
            'delay': (530, 610, 690, 770, 850, 930, 1010, 1090, 1170),
            'flash2': 100,
            'decision': 700,
        }
        if timing:
            self.timing.update(timing)
        self.mid_delay = np.median(self.timing['delay'][1])

        self.abort = False

        name = {'fixation': 0, 'rule': [1, 2], 'stimulus': [3, 4]}
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(5, ),
                                            dtype=np.float32,
                                            name=name)
        name = {'fixation': 0, 'rule': [1, 2], 'choice': [3, 4]}
        self.action_space = spaces.Discrete(5, name=name)

        self.chose_correct_rule = False
        self.rule = 0
        self.trial_in_block = 0
        self.block_size = 10
        self.new_block()
Exemple #30
0
    def __init__(self,
                 dt=100,
                 rewards=None,
                 timing=None,
                 stim_scale=1.,
                 sigma=1.0):
        super().__init__(dt=dt)
        self.choices = [1, 2]
        # cohs specifies the amount of evidence (modulated by stim_scale)
        self.cohs = np.array([0, 6.4, 12.8, 25.6, 51.2]) * stim_scale
        self.sigma = sigma / np.sqrt(self.dt)  # Input noise

        # Rewards
        self.rewards = {'abort': -0.1, 'correct': +1., 'fail': 0.}
        if rewards:
            self.rewards.update(rewards)

        self.timing = {
            'fixation': 0,
            'stimulus': 1150,
            #  TODO: sampling of delays follows exponential
            'delay': (300, 500, 700, 900, 1200, 2000, 3200, 4000),
            # 'go_cue': 100, # TODO: Not implemented
            'decision': 1500
        }
        if timing:
            self.timing.update(timing)

        self.abort = False

        # action and observation spaces
        self.action_space = spaces.Discrete(3)
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=(3, ),
                                            dtype=np.float32)