Exemple #1
0
def event_random(N, k, mult=1, prng=None):
    """
    Simulates (by Monte Carlo methods) a event-related trialset of 
    N + 1 randomized conditions (adding one to N for the jittered 
    baseline, which is always 0) of total length N+1*k, where k is 
    the number of trials/condition.
    
    It is assumed that trial and baseline events are of the same length 
    and that both are interger multilples of the TR.  

    For example, if mult is 1 (default) then each condition event takes 
    exactly one TR.  If it is 2 then that there are 2 TRs per 
    condition event, and so on.

    Note: 0 (or null) conditions have a special meaning in the fitRL 
    functions.

    Returns a list of the conditions; '0' always indicates baseline,
    the terminal state.
    """

    prng = process_prng(prng)
    
    ## Create N conditions, randomize their order
    conditions = []
    [conditions.extend([int(cond), ] * k) for cond in range(0,(N+1))]
    prng.shuffle(conditions)
        ## In place
    
    if mult == 1:
        return conditions, prng
    else:
        conditions_mult = []
        [conditions_mult.extend([c, ] * int(mult)) for c in conditions]
        return conditions_mult, prng
Exemple #2
0
def some_learn(N, k, N_learn, loc, event=True, rand_learn=True, prng=None):
    """
    Creates 'uneven' acc, and p value distributions for k trials in 
    N conditions in the returned trialset where N_learn is the number 
    of conditions that show learning (via sim_acc_learn()).  
    
    N minus N_learn condtions simulated data will be governed instead 
    by sim_acc_rand. If event is True an event-related trialset is 
    created.
    """
    from copy import deepcopy

    prng = process_prng(prng)    
    if N == N_learn:
        raise ValueError('N_learn must be less than N.')
    if N_learn <= 0:
        raise ValueError('N_learn must be 1 or greater.')

    if event:
        trialset, prng = event_randomtrial(N, k, mult=1, prng=prng)
    else:
        trialset, prng = randomtrial(N, k, prng)

    N_c = deepcopy(N)
    if event:
        N_c += 1
    
    acc = [0, ] * (N_c*k)
    p = [0, ] * (N_c*k)

    names = list(set(trialset))
    for ii,n in enumerate(names):
        ## Skip null trials
        if (n == 0) | (n == '0'):
            continue

        ## How many trials/condition?
        acc_n = []
        p_n = []

        ## How many trials/condition?
        if ii <= N_learn: 
            print('Learning in iteration {0}.'.format(ii))
            if rand_learn:
                acc_n, p_n, prng = random_learn(k, 1./N, loc, prng)
            else:
                acc_n, p_n, prng = learn(k, loc, prng)
        else:
            acc_n, p_n, prng = randomacc(k,1./N)

        for jj,t in enumerate(trialset):
            if t == n:
                acc[jj] = acc_n.pop(0)
                p[jj] = p_n.pop(0)

    return trialset, acc, p, prng
Exemple #3
0
def random(N, p, prng=None):
    """
    Generate and returns a random sequence of {1,0} impulses of length <N> 
    with p probability of drawing a 1.
    """
    
    prng = process_prng(prng)
    
    acc = prng.binomial(1, p, N)
    return acc.tolist(), [p, ] * N, prng
Exemple #4
0
def random(N, k, prng=None):
    """
    Creates a trialset of N randomized conditions of total length N+1*k, 
    where k is the number of trials/condition. Returns a list of conditions.
    Conditions are indexed from 1; 0 implies terminal conditions, something 
    this function can't handle.
    """

    prng = process_prng(prng)
    
    conditions = []
    [conditions.extend([n, ] * k) for n in range(1, N+1)]
    prng.shuffle(conditions) 
        ## in place

    return conditions, prng
Exemple #5
0
def random(N, k, prng=None):
    """
    Creates a trialset of N randomized conditions of total length N+1*k, 
    where k is the number of trials/condition. Returns a list of conditions.
    Conditions are indexed from 1; 0 implies terminal conditions, something 
    this function can't handle.
    """

    prng = process_prng(prng)

    conditions = []
    [conditions.extend([
        n,
    ] * k) for n in range(1, N + 1)]
    prng.shuffle(conditions)
    ## in place

    return conditions, prng
Exemple #6
0
def learn(N, k, loc, event=True, prng=None):
    """
    Returns a trialset matching N,k along with matching accuracy and
    probability lists. If event is True event_random is used in place of 
    random to create the trialset.
    """
    from copy import deepcopy
    
    prng = process_prng(prng)
    
    ## For each condition name (n),
    ## create acc_n,p_n then map those
    ## into acc,p for each matching trial (t)
    ## in the trialset
    if event:
        trialset, prng = event_randomtrial(N, k, 1, prng)
    else:
        trialset, prng = randomtrial(N, k, prng)
    
    N_c = deepcopy(N)
    if event: 
        N_c += 1
    
    acc = [0, ] * (N_c*k)
    p = [0, ] * (N_c*k)

    names = list(set(trialset))
    for n in names:
        ## Skip null trials
        if (n is 0) | (n is '0'):
            continue

        ## How many trials/condition?
        acc_n = []
        p_n = []
        acc_n, p_n, prng = random_learn(k, 1/N, loc, prng)
        for ii, t in enumerate(trialset):
            if t == n:
                acc[ii] = acc_n.pop(0)
                p[ii] = p_n.pop(0)

    return trialset, acc, p, prng
Exemple #7
0
def random_learn(N, p_rand, loc, prng=None):
    """
    Generates and returns an binomial array of length N where the p(1) 
    (also returned) increases with the CDF of the normal distribution, 
    after random trial T (sampled from a uniform distribution).  Before
    T accuracy is random governed .
    """
    from simBehave.acc import random

    prng = process_prng(prng)
    
    T = int(prng.randint(0,N,1))

    acc_1, p_1, prng = random(T, p_rand, prng)
    
    # Learn:
    trials = np.arange(.01, 10, (10/float(N - T)))
    
    # Pass random state from prng to np so that
    # stats.<> will inherit the irght state.
    # There does not seem to be a way to set random
    # state of stats.* functions directly.
    np.random.set_state(prng.get_state())
    
    trials = trials + stats.norm.rvs(size=trials.shape[0]) 
    p_2 = stats.norm.cdf(trials,loc)
    p_2[p_2 < 0.5] = 0.5
        ## Rwmove p vals lees than 0.5 -- 
        ## we don't want below chance sampling
        
    prng.set_state(np.random.get_state())
        ## Pass the seed stat from np back to
        ## prng, then we can use prng again...
    
    acc_2 = [int(prng.binomial(1, p, 1)) for p in p_2]
    
    acc = acc_1 + acc_2
    p = list(p_1) + list(p_2)

    return acc, p, prng
Exemple #8
0
def event_random(N, k, mult=1, prng=None):
    """
    Simulates (by Monte Carlo methods) a event-related trialset of 
    N + 1 randomized conditions (adding one to N for the jittered 
    baseline, which is always 0) of total length N+1*k, where k is 
    the number of trials/condition.
    
    It is assumed that trial and baseline events are of the same length 
    and that both are interger multilples of the TR.  

    For example, if mult is 1 (default) then each condition event takes 
    exactly one TR.  If it is 2 then that there are 2 TRs per 
    condition event, and so on.

    Note: 0 (or null) conditions have a special meaning in the fitRL 
    functions.

    Returns a list of the conditions; '0' always indicates baseline,
    the terminal state.
    """

    prng = process_prng(prng)

    ## Create N conditions, randomize their order
    conditions = []
    [conditions.extend([
        int(cond),
    ] * k) for cond in range(0, (N + 1))]
    prng.shuffle(conditions)
    ## In place

    if mult == 1:
        return conditions, prng
    else:
        conditions_mult = []
        [conditions_mult.extend([
            c,
        ] * int(mult)) for c in conditions]
        return conditions_mult, prng
Exemple #9
0
def learn(N, loc, prng=None):
    """
    Generates and returns an binomial array of length N where the p(1) 
    (also returned) increases with the CDF of the normal distribution, 
    integrated from 0.01 to 10, plus white noise.
    
    The learning rate is determined by sampling of a normal distribution 
    centered on loc.  
    
    Note: A loc of 3 gives curves qualitatively similar to learning 
    patterns often observed in the abstract categorization tasks 
    common to the Seger Lab.
    """

    prng = process_prng(prng)
    
    # Pass random state from prng to np so that
    # stats.<> will inherit the irght state.
    # There does not seem to be a way to set random
    # state of stats.* functions directly.
    np.random.set_state(prng.get_state())
    
    ## Learn:
    ## Create a noisy range for the CDF,
    trials = np.arange(.01,10,10/float(N))
    trials = trials + stats.norm.rvs(size=trials.shape[0]) 
    p_values = stats.norm.cdf(trials,loc)            

    # And p_values becomes acc
    prng.set_state(np.random.get_state())
        ## Pass the seed stat from np back to
        ## prng, then we can use prng again...
    
    acc = [int(prng.binomial(1, p, 1)) for p in p_values] 

    return acc, list(p_values), prng