Example #1
0
def getRandomMixture(G, p, KL_lower, KL_upper, dtypes='discgauss', M=4,seed = None):

#    if seed:
#        random.seed(seed)
#        mixextend.set_gsl_rng_seed(seed)
#        #print '*** seed=',seed
#
#    else: # XXX debug
#        seed = random.randint(1,9000000)
#        mixextend.set_gsl_rng_seed(seed)
#        random.seed(seed)
#        #print '*** seed=',seed


    #M = 4  # Alphabet size for discrete distributions

    min_sigma = 0.1    # minimal std for Normal
    max_sigma = 1.0   # maximal std for Normal
    min_mu = -5.0      # minimal mean
    max_mu = 8.0       # maximal mean

    if dtypes == 'disc':
        featureTypes = [0] * p
    elif dtypes == 'gauss':
        featureTypes = [1] * p
    elif dtypes == 'discgauss':
        # discrete or Normal features for now, chosen uniformly
        # 0 discrete, 1 Normal
        featureTypes = [ random.choice( (0, 1) )  for i in range(p) ]
    else:
        raise TypeError


    #print featureTypes

    C = []
    for j in range(p):
        c_j = []
        for i in range(G):
            #print i,j
            if featureTypes[j] == 0:
                acc = 0
                while acc == 0:
                    cand = DiscreteDistribution(M, random_vector(M) )

                    #print 'cand:',cand

                    acc = 1

                    for d in c_j:
                        KL_dist = sym_kl_dist(d,cand)
                        if KL_dist > KL_upper or KL_dist < KL_lower:
                            #print '  *', cand, 'rejected:', d , KL_dist
                            acc = 0
                            break

                c_j.append(cand)
            elif featureTypes[j] == 1:
                acc = 0
                while acc == 0:
                    mu = random.uniform(min_mu, max_mu)
                    sigma = random.uniform(min_sigma, max_sigma)

                    cand = NormalDistribution(mu, sigma )

                    #print 'cand:',cand

                    acc = 1

                    for d in c_j:
                        KL_dist = sym_kl_dist(d,cand)
                        if KL_dist > KL_upper or KL_dist < KL_lower:
                            #print '  *', cand, 'rejected:', d , KL_dist
                            acc = 0

                c_j.append(cand)

            else:
                RuntimeError

        C.append(c_j)

#    print '\n'
#    for cc in C:
#        print cc


    comps = []
    for i in range(G):
        comps.append( ProductDistribution( [ C[j][i] for j in range(p) ] ) )

    pi = get_random_pi(G,0.1)

    m = MixtureModel(G,pi, comps,struct=1)
    m.updateFreeParams()

    return m
Example #2
0
def getRandomMixture(G,
                     p,
                     KL_lower,
                     KL_upper,
                     dtypes='discgauss',
                     M=4,
                     seed=None):

    #    if seed:
    #        random.seed(seed)
    #        mixextend.set_gsl_rng_seed(seed)
    #        #print '*** seed=',seed
    #
    #    else: # XXX debug
    #        seed = random.randint(1,9000000)
    #        mixextend.set_gsl_rng_seed(seed)
    #        random.seed(seed)
    #        #print '*** seed=',seed

    #M = 4  # Alphabet size for discrete distributions

    min_sigma = 0.1  # minimal std for Normal
    max_sigma = 1.0  # maximal std for Normal
    min_mu = -5.0  # minimal mean
    max_mu = 8.0  # maximal mean

    if dtypes == 'disc':
        featureTypes = [0] * p
    elif dtypes == 'gauss':
        featureTypes = [1] * p
    elif dtypes == 'discgauss':
        # discrete or Normal features for now, chosen uniformly
        # 0 discrete, 1 Normal
        featureTypes = [random.choice((0, 1)) for i in range(p)]
    else:
        raise TypeError

    #print featureTypes

    C = []
    for j in range(p):
        c_j = []
        for i in range(G):
            #print i,j
            if featureTypes[j] == 0:
                acc = 0
                while acc == 0:
                    cand = DiscreteDistribution(M, random_vector(M))

                    #print 'cand:',cand

                    acc = 1

                    for d in c_j:
                        KL_dist = sym_kl_dist(d, cand)
                        if KL_dist > KL_upper or KL_dist < KL_lower:
                            #print '  *', cand, 'rejected:', d , KL_dist
                            acc = 0
                            break

                c_j.append(cand)
            elif featureTypes[j] == 1:
                acc = 0
                while acc == 0:
                    mu = random.uniform(min_mu, max_mu)
                    sigma = random.uniform(min_sigma, max_sigma)

                    cand = NormalDistribution(mu, sigma)

                    #print 'cand:',cand

                    acc = 1

                    for d in c_j:
                        KL_dist = sym_kl_dist(d, cand)
                        if KL_dist > KL_upper or KL_dist < KL_lower:
                            #print '  *', cand, 'rejected:', d , KL_dist
                            acc = 0

                c_j.append(cand)

            else:
                RuntimeError

        C.append(c_j)


#    print '\n'
#    for cc in C:
#        print cc

    comps = []
    for i in range(G):
        comps.append(ProductDistribution([C[j][i] for j in range(p)]))

    pi = get_random_pi(G, 0.1)

    m = MixtureModel(G, pi, comps, struct=1)
    m.updateFreeParams()

    return m
Example #3
0
    2, [0.5, 0.5],
    [NormalDistribution(1.0, 1.0),
     NormalDistribution(0.5, 1.0)],
    compFix=[0, 2])
mix4 = MixtureModel(
    2, [0.1, 0.9],
    [NormalDistribution(2.0, 1.0),
     NormalDistribution(1.5, 1.0)],
    compFix=[0, 2])
pd2 = ProductDistribution([mix3, mix4, mult2])

m7 = MixtureModel(2, [0.4, 0.6], [pd1, pd2])

seq6 = m7.sampleSet(80)

p = random_vector(4)
mult3 = MultinomialDistribution(3, 4, p)
mix5 = MixtureModel(
    2, [0.3, 0.7],
    [NormalDistribution(-1.0, 1.0),
     NormalDistribution(-2.0, 1.0)],
    compFix=[0, 2])
mix6 = MixtureModel(
    2, [0.8, 0.2],
    [NormalDistribution(-2.0, 1.0),
     NormalDistribution(2.0, 1.0)],
    compFix=[0, 2])
pd3 = ProductDistribution([mix5, mix6, mult3])

p = random_vector(4)
mult4 = MultinomialDistribution(3, 4, p)
Example #4
0
piPrior = DirichletDistribution(G, [1.0] * G)

compPrior = []
for i in range(2):
    compPrior.append(DirichletDistribution(4, [1.02, 1.02, 1.02, 1.02]))
for i in range(2):
    compPrior.append(NormalGammaDistribution(1.0, 2.0, 3.0, 4.0))

mixPrior = MixturePrior(0.7, 0.7, piPrior, compPrior)

DNA = Alphabet(['A', 'C', 'G', 'T'])
comps = []
for i in range(G):
    dlist = []
    for j in range(2):
        phi = random_vector(4)
        dlist.append(DiscreteDistribution(4, phi, DNA))
    for j in range(2):
        mu = j + 1.0
        sigma = j + 0.5
        dlist.append(NormalDistribution(mu, sigma))

    comps.append(ProductDistribution(dlist))
pi = random_vector(G)

m = BayesMixtureModel(G, pi, comps, mixPrior, struct=1)

mixture.writeMixture(m, 'test.bmix')

m2 = readMixture('test.bmix')
Example #5
0
mult1 = MultinomialDistribution(3, 4, [0.25, 0.25, 0.25, 0.25])
mix1 = MixtureModel(2, [0.3, 0.7], [NormalDistribution(-2.0, 1.0), NormalDistribution(-2.0, 1.0)], compFix=[0, 2])
mix2 = MixtureModel(2, [0.8, 0.2], [NormalDistribution(-3.0, 1.0), NormalDistribution(3.0, 1.0)], compFix=[0, 2])
pd1 = ProductDistribution([mix1, mix2, mult1])

mult2 = MultinomialDistribution(3, 4, [0.2, 0.1, 0.5, 0.2])
mix3 = MixtureModel(2, [0.5, 0.5], [NormalDistribution(1.0, 1.0), NormalDistribution(0.5, 1.0)], compFix=[0, 2])
mix4 = MixtureModel(2, [0.1, 0.9], [NormalDistribution(2.0, 1.0), NormalDistribution(1.5, 1.0)], compFix=[0, 2])
pd2 = ProductDistribution([mix3, mix4, mult2])

m7 = MixtureModel(2, [0.4, 0.6], [pd1, pd2])

seq6 = m7.sampleSet(80)

p = random_vector(4)
mult3 = MultinomialDistribution(3, 4, p)
mix5 = MixtureModel(2, [0.3, 0.7], [NormalDistribution(-1.0, 1.0), NormalDistribution(-2.0, 1.0)], compFix=[0, 2])
mix6 = MixtureModel(2, [0.8, 0.2], [NormalDistribution(-2.0, 1.0), NormalDistribution(2.0, 1.0)], compFix=[0, 2])
pd3 = ProductDistribution([mix5, mix6, mult3])

p = random_vector(4)
mult4 = MultinomialDistribution(3, 4, p)
mix7 = MixtureModel(2, [0.5, 0.5], [NormalDistribution(-1.0, 1.0), NormalDistribution(-3.0, 1.0)], compFix=[0, 2])
mix8 = MixtureModel(2, [0.1, 0.9], [NormalDistribution(2.5, 1.0), NormalDistribution(0.5, 1.0)], compFix=[0, 2])
pd4 = ProductDistribution([mix3, mix4, mult4])

m8 = MixtureModel(2, [0.5, 0.5], [pd3, pd4])
m8.randParams(seq6)

m8.EM(seq6, 15, 0.3)