Example #1
0
def types(l):
    typeList = []
    for i in l:
        typeList.append(LiteralPlayer(alpha,i))
    for i in l:
        typeList.append(GriceanPlayer(alpha,lam,i))
    return typeList 
def types(l):
    """outputs list of types from list of lexica input."""
    typeList = []
    for i in l:
        typeList.append(LiteralPlayer(alpha, i))
    for i in l:
        typeList.append(GriceanPlayer(alpha, lam, i))
    return typeList
Example #3
0
def run_dynamics(alpha, lam, k, sample_amount, gens, runs, states, messages,
                 learning_parameter, kind,
                 mutual_exclusivity):  #Main function to run dynamics

    state_freq = np.ones(states) / float(
        states)  #frequency of states s_1,...,s_n

    print '#Starting, ', datetime.datetime.now()

    lexica = get_lexica(states, messages, mutual_exclusivity)
    bins = get_lexica_bins(lexica)  #To bin types with identical lexica
    l_prior = get_prior(lexica)
    typeList = [LiteralPlayer(lam, lex) for lex in lexica
                ] + [GriceanPlayer(alpha, lam, lex) for lex in lexica]

    likelihoods = np.array([t.sender_matrix for t in typeList])

    u = get_utils(typeList, states, messages, lam, alpha, mutual_exclusivity)
    q = get_mutation_matrix(states, messages, state_freq, likelihoods, l_prior,
                            learning_parameter, sample_amount, k, lam, alpha,
                            mutual_exclusivity)

    print '#Beginning multiple runs, ', datetime.datetime.now()
    f = csv.writer(
        open(
            './results/%s-s%d-m%d-lam%d-a%d-k%d-samples%d-l%d-g%d-me%s.csv' %
            (kind, states, messages, lam, alpha, k, sample_amount,
             learning_parameter, gens, str(mutual_exclusivity)), 'wb'))
    f.writerow(['runID','kind']+['t_ini'+str(x) for x in xrange(len(typeList))] +\
               ['lam', 'alpha','k','samples','l','gens', 'm_excl'] + ['t_final'+str(x) for x in xrange(len(typeList))])

    if os.path.isfile(
            './results/00mean-%s-s%d-m%d-g%d-r%d-me%s.csv' %
        (kind, states, messages, gens, runs, str(mutual_exclusivity))):
        f_mean = csv.writer(
            open(
                './results/00mean-%s-s%d-m%d-g%d-r%d-me%s.csv' %
                (kind, states, messages, gens, runs, str(mutual_exclusivity)),
                'a'))
    else:
        f_mean = csv.writer(
            open(
                './results/00mean-%s-s%d-m%d-g%d-r%d-me%s.csv' %
                (kind, states, messages, gens, runs, str(mutual_exclusivity)),
                'wb'))
        f_mean.writerow([
            'kind', 'lam', 'alpha', 'k', 'samples', 'l', 'gens', 'runs',
            'm_excl'
        ] + ['t_mean' + str(x) for x in xrange(len(typeList))])

    p_sum = np.zeros(len(typeList))  #vector to store mean across runs

    for i in xrange(runs):
        p = np.random.dirichlet(np.ones(
            len(typeList)))  # unbiased random starting state
        p_initial = p
        for r in range(gens):
            if kind == 'rmd':
                pPrime = p * [np.sum(u[t, ] * p) for t in range(len(typeList))]
                pPrime = pPrime / np.sum(pPrime)
                p = np.dot(pPrime, q)
            elif kind == 'm':
                p = np.dot(p, q)
            elif kind == 'r':
                pPrime = p * [np.sum(u[t, ] * p) for t in range(len(typeList))]
                p = pPrime / np.sum(pPrime)

        f.writerow([str(i),kind] + [str(p_initial[x]) for x in xrange(len(typeList))]+\
                   [str(lam),str(alpha),str(k),str(sample_amount),str(learning_parameter),str(gens),str(mutual_exclusivity)] +\
                   [str(p[x]) for x in xrange(len(typeList))])
        p_sum += p
    p_mean = p_sum / runs
    f_mean.writerow([kind,str(lam),str(alpha),str(k),str(sample_amount),str(learning_parameter),str(gens),str(runs),str(mutual_exclusivity)] +\
                        [str(p_mean[x]) for x in xrange(len(typeList))])

    inc = np.argmax(p_mean)
    inc_bin = get_type_bin(inc, bins)

    print
    print '##### Mean results#####'
    print '### Parameters: ###'
    print 'dynamics= %s, alpha = %d, lambda = %d, k = %d, samples per type = %d, learning parameter = %.2f, generations = %d, runs = %d' % (
        kind, alpha, lam, k, sample_amount, learning_parameter, gens, runs)
    print '#######################'
    print
    print 'Incumbent type:', inc, ' with proportion ', p_mean[inc]
    if mutual_exclusivity:
        print 'Target type (t24) proportion: ', p_mean[24]
    print '#######################'
    print
states = 2 #number of states
messages = 2 #number of messages


f_unwgh_mean = csv.writer(open('./results/singlescalar-unwgh-mean-a%d-c%f-l%d-k%d-g%d-r%d.csv' %(alpha,cost,lam,k,gens,runs),'wb')) #file to store each unweighted simulation run after n generations
f_wgh_mean = csv.writer(open('./results/singlescalar-wgh-mean-a%d-c%f-l%d-k%d-g%d-r%d.csv' %(alpha,cost,lam,k,gens,runs),'wb')) #file to store each weighted simulation run after n generations

f_q = csv.writer(open('./results/singlescalar-q-matrix-a%d-c%f-l%d-k%d-g%d-r%d.csv' %(alpha,cost,lam,k,gens,runs),'wb')) #file to store Q-matrix



print '#Starting, ', datetime.datetime.now()

t1,t2,t3,t4,t5,t6 = LiteralPlayer(alpha,l1), LiteralPlayer(alpha,l2), LiteralPlayer(alpha,l3), LiteralPlayer(alpha,l4), LiteralPlayer(alpha,l5), LiteralPlayer(alpha,l6)
t7,t8,t9,t10,t11,t12 =  GriceanPlayer(alpha,lam,l1), GriceanPlayer(alpha,lam,l2), GriceanPlayer(alpha,lam,l3), GriceanPlayer(alpha,lam,l4), GriceanPlayer(alpha,lam,l5), GriceanPlayer(alpha,lam,l6)

typeList = [t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12]

print '#Computing likelihood, ', datetime.datetime.now()
likelihoods = np.array([t.sender_matrix for t in typeList])

lexica_prior = np.array([2, 2- 2* cost, 2, 2 - cost , 2 , 2-cost, 2, 2- 2* cost, 2, 2 - cost , 2 , 2-cost])

lexica_prior = lexica_prior / sum(lexica_prior)

print lexica_prior

def normalize(m):
    return m / m.sum(axis=1)[:, np.newaxis]
Example #5
0
def run(alpha, cost, lam, k, learning_parameter, gens, runs):
    #####
    l1, l2, l3, l4, l5, l6 = np.array([[0., 0.], [1., 1.]]), np.array(
        [[1., 1.], [0., 0.]]), np.array([[1., 1.], [1., 1.]]), np.array(
            [[0., 1.], [1., 0.]]), np.array([[0., 1.],
                                             [1., 1.]]), np.array([[1., 1.],
                                                                   [1., 0.]])

    sample_amount = 15  #fixed value for fixed Q

    states = 2  #number of states
    messages = 2  #number of messages

    state_freq = np.ones(states) / float(
        states)  #frequency of states s_1,...,s_n

    f = csv.writer(
        open(
            './results/singlescalar-a%.2f-c%.2f-l%d-k%d-samples%d-learn%.2f-g%d-r%d.csv'
            % (alpha, cost, lam, k, sample_amount, learning_parameter, gens,
               runs), 'wb'))  #file to store mean results

    f.writerow([
        "run_ID", "t1_initial", "t2_initial", "t3_initial", "t4_initial",
        "t5_initial", "t6_initial", "t7_initial", "t8_initial", "t9_initial",
        "t10_initial", "t11_initial", "t12_initial", "alpha", "prior_cost_c",
        "lambda", "k", "sample_amount", "learning_parameter", "generations",
        "t1_final", "t2_final", "t3_final", "t4_final", "t5_final", "t6_final",
        "t7_final", "t8_final", "t9_final", "t10_final", "t11_final",
        "t12_final"
    ])

    f_q = csv.writer(
        open(
            './results/singlescalar-q-matrix-a%d-c%f-l%d-k%d-samples%d-learn%.2f.csv'
            % (alpha, cost, lam, k, sample_amount, learning_parameter),
            'wb'))  #file to store Q-matrix

    f_q.writerow([
        "alpha", "prior_cost_c", "lambda", "k", "sample_amount",
        "learning_parameter", "parent", "t1_mut", "t2_mut", "t3_mut", "t4_mut",
        "t5_mut", "t6_mut", "t7_mut", "t8_mut", "t9_mut", "t10_mut", "t11_mut",
        "t12_mut"
    ])
    ######

    print '#Starting, ', datetime.datetime.now()

    t1, t2, t3, t4, t5, t6 = LiteralPlayer(alpha, l1), LiteralPlayer(
        alpha, l2), LiteralPlayer(alpha,
                                  l3), LiteralPlayer(alpha, l4), LiteralPlayer(
                                      alpha, l5), LiteralPlayer(alpha, l6)
    t7, t8, t9, t10, t11, t12 = GriceanPlayer(alpha, lam, l1), GriceanPlayer(
        alpha, lam, l2), GriceanPlayer(alpha, lam, l3), GriceanPlayer(
            alpha, lam, l4), GriceanPlayer(alpha, lam,
                                           l5), GriceanPlayer(alpha, lam, l6)

    typeList = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12]

    print '#Computing likelihood, ', datetime.datetime.now()
    likelihoods = np.array([t.sender_matrix for t in typeList])

    lexica_prior = np.array([
        2, 2 - 2 * cost, 2, 2 - cost, 2, 2 - cost, 2, 2 - 2 * cost, 2,
        2 - cost, 2, 2 - cost
    ])
    lexica_prior = lexica_prior / sum(lexica_prior)

    print '#Computing utilities, ', datetime.datetime.now()
    u = get_utils(typeList)

    print '#Computing Q, ', datetime.datetime.now()

    q = get_mutation_matrix(k, states, messages, likelihoods, state_freq,
                            sample_amount, lexica_prior, learning_parameter)

    for i in q:
        para = np.array([
            str(alpha),
            str(cost),
            str(lam),
            str(k),
            str(sample_amount),
            str(learning_parameter)
        ])
        j = np.append(para, i)
        f_q.writerow(j)

###Multiple runs
    print '#Beginning multiple runs, ', datetime.datetime.now()

    p_sum = np.zeros(len(typeList))  #vector to store results from a run

    for i in xrange(runs):
        p = np.random.dirichlet(np.ones(
            len(typeList)))  # unbiased random starting state
        p_initial = p

        for r in range(gens):
            pPrime = p * [np.sum(u[t, ] * p) for t in range(len(typeList))]
            pPrime = pPrime / np.sum(pPrime)
            p = np.dot(pPrime, q)
        f.writerow([
            str(i),
            str(p_initial[0]),
            str(p_initial[1]),
            str(p_initial[2]),
            str(p_initial[3]),
            str(p_initial[4]),
            str(p_initial[5]),
            str(p_initial[6]),
            str(p_initial[7]),
            str(p_initial[8]),
            str(p_initial[9]),
            str(p_initial[10]),
            str(p_initial[11]),
            str(alpha),
            str(cost),
            str(lam),
            str(k),
            str(sample_amount),
            str(learning_parameter),
            str(gens),
            str(p[0]),
            str(p[1]),
            str(p[2]),
            str(p[3]),
            str(p[4]),
            str(p[5]),
            str(p[6]),
            str(p[7]),
            str(p[8]),
            str(p[9]),
            str(p[10]),
            str(p[11])
        ])

        p_sum += p

    p_mean = p_sum / runs

    print '###Overview of results###', datetime.datetime.now()
    print 'Parameters: alpha = %.2f, c = %.2f, lambda = %d, k = %d, samples per type = %d, learning parameter = %.2f, generations = %d, runs = %d' % (
        alpha, cost, lam, k, sample_amount, learning_parameter, gens, runs)
    print 'Mean by type:'
    print p_mean