Example #1
0
def scoreStructureLearning(N, gen, delta, seed=None, silent=False, skipAfterRNGcalls = False):
    """
    If skipAfterRNGcalls is True, the function terminates after all calls to RNGs have been done.
    """


    #print 'start scoring'

#    if seed != None:
#        random.seed(seed)
#        mixextend.set_gsl_rng_seed(seed)
#        print '*** given seed=',seed
#
#    else: # XXX debug
#        seed = random.randint(1,999999999)
#        random.seed(seed)
#        mixextend.set_gsl_rng_seed(seed)
#        print '*** random seed=',seed



    data = gen.sampleDataSet(N)



    # XXX update NormalGammaPrior hyperparameters
    for j in range(gen.dist_nr):
        if isinstance(gen.prior.compPrior[j], NormalGammaPrior):
            gen.prior.compPrior[j].setParams(data.getInternalFeature(j), gen.G)

    gen.prior.structPriorHeuristic(delta, data.N)

    print '\nupdating generating model structure:'
    print 'vorher:'
    print gen.leaders
    print gen.groups


    fullEnumerationExhaustive.updateStructureBayesianFullEnumeration(gen, data, silent=1)

    print '\nnachher:'
    print gen.leaders
    print gen.groups


    if silent == False:
        printModel(gen,'generating model')


    m = copy.copy(gen)
    # reset structure
    m.initStructure()

    # training parameters
    nr_rep = 40 # XXX
    #nr_rep = 4 # XXX


    nr_steps = 400
    em_delta = 0.6

    print 'start training'
    print 'EM repeats:',nr_rep

    m.randMaxTraining(data,nr_rep, nr_steps,em_delta,silent=1,rtype=0)
    print 'finished training'

    if skipAfterRNGcalls == True:
        print '*** Skipping !'
        return np.zeros(4)



#    # check for consistency of component indices (identifiability issues)
#    bad = 0
    if silent == False:
        cmap = {}

        for j in range(gen.dist_nr):
            print '\nfeature:',j

            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j], gen.components[i2][j])
                print i1,'->', kldists.argmin(), map(lambda x:'%.2f' % float(x),kldists)     # kldists.min()


#        for i1 in range(m.G):
#            print
#            cdists = np.zeros(m.G)
#            for i2 in range(m.G):
#                cdists[i2] = product_distribution_sym_kl_dist(m.components[i1], gen.components[i2])
#                #print i1,i2,product_distribution_sym_kl_dist(m.components[i1], gen.components[i2])
#
#            print i1,'maps to', np.argmin(cdists), cdists.tolist()
#            amin = np.argmin(cdists)
#            if not amin == i1:     # minimal KL distance should occur at equal indices in gen and m
#                bad = 1
#                cmap[i1] = amin

#    if bad:
#
#
#
#        # XXX check whether cmap defines new unambiguous ordering
#
#        # check whether components have switched positions
#        reorder = 0
#        order = range(m.G)
#        try:
#
#            #print cmap
#
#            for i1 in cmap.keys():
#                order[i1] = cmap[i1]
#
#            #print order
#            #print set(order)
#            #print  list(set(order))
#
#            if len(set(order)) == m.G:
#                reorder = 1
#        except KeyError:
#            pass
#        except AssertionError:
#            pass
#
#        if reorder:
#            print '** new order', order
#
#            m.reorderComponents(order)
#
#        else:
#
#
#            #print cdists
#            print i1,'maps to', np.argmin(cdists)
#
#            print 'Failed matching.'
#
#            print 'generating model gen:'
#            print gen
#
#            print 'trained model m:'
#            print m
#
#            raise ValueError


#    mtest =copy.copy(gen)
#    ch = mtest.updateStructureBayesian(data,silent=1)
#    print '\nTEST:',ch
#    for j in range(m.dist_nr):
#        print j,mtest.leaders[j], mtest.groups[j]


    #print m.prior

    print '-----------------------------------------------------------------------'
    print '\n True structure:'
    print 'True model post:',mixture.get_loglikelihood(gen, data) + gen.prior.pdf(gen)
    #for j in range(m.dist_nr):
    #    print j,gen.leaders[j], gen.groups[j]
    print gen.leaders
    print gen.groups

    if silent == False:
        printModel(m,'trained model')

    m1 = copy.copy(m)
    t0 = time.time()
    #print '\n\n################# TOPDOWN #####################'
    m1.updateStructureBayesian(data,silent=1)
    t1 = time.time()
    time2 = t1-t0
    #m1.mapEM(data,40,0.1)
    print '\nTop down (',str(time2),'s ):'
    print m1.leaders
    print m1.groups
    print 'Top down model post:',mixture.get_loglikelihood(m1, data) + m1.prior.pdf(m1)
#    print 'Accuracy:',mixture.structureAccuracy(gen,m1)  # structureEditDistance(gen,m1)

    if silent == False:
        printModel(m1,'top down model')


    #print '#############################'


    #print '\n\n################# FULL FixedOrder #####################'
    m2 = copy.copy(m)
    t0 = time.time()
    m2.updateStructureBayesianFullEnumerationFixedOrder(data,silent=1)
    t1 = time.time()
    time2 = t1-t0
    #m2.mapEM(data,40,0.1)

#    print
#    for j in range(m2.dist_nr):
#        print j,m2.leaders[j], m2.groups[j]
    print '\nFull enumeration Fixed Order  (',str(time2),'s ):'
    print m2.leaders
    print m2.groups
    print 'Full fixed order model post:',mixture.get_loglikelihood(m2, data) + m2.prior.pdf(m2)
#    print 'Accuracy:',mixture.structureAccuracy(gen,m2) # structureEditDistance(gen,m1)


    if silent == False:
        printModel(m2,'full fixed model')




    #print '\n\n################# BOTTUMUP #####################'
    m3 = copy.copy(m)
    t0 = time.time()
    m3.updateStructureBayesianBottomUp(data,silent=1)
    t1 = time.time()
    time2 = t1-t0
    #m3.mapEM(data,40,0.1)
#    print
#    for j in range(m3.dist_nr):
#        print j,m3.leaders[j], m3.groups[j]
    print '\nBottom up: (',str(time2),'s ):'
    print m3.leaders
    print m3.groups
    print 'Bottom up model post:',mixture.get_loglikelihood(m3, data) + m3.prior.pdf(m3)
#    print 'Accuracy:',mixture.structureAccuracy(gen,m3) # structureEditDistance(gen,m1)


    if silent == False:
        printModel(m3,'bottom up model')


    #print '\n\n################# FULL enumeration #####################'
    m4 = copy.copy(m)
    t0 = time.time()
    fullEnumerationExhaustive.updateStructureBayesianFullEnumeration(m4, data, silent=0)
    t1 = time.time()
    time2 = t1-t0
   # m4.mapEM(data,40,0.1)
#    print
#    for j in range(m4.dist_nr):
#        print j,m4.leaders[j], m4.groups[j]
    print '\nFull enumeration: (',str(time2),'s )'
    print m4.leaders
    print m4.groups
    print 'Full enumeration model post:',mixture.get_loglikelihood(m4, data) + m4.prior.pdf(m4)
#    print 'Accuracy:',mixture.structureAccuracy(gen,m4)

    if silent == False:
        printModel(m4,'full enumeration model')


    print '-----------------------------------------------------------------------'



#    dtop = structureAccuracy(gen,m1)
#    dfull_fixed = structureAccuracy(gen,m2)
#    dfull = structureAccuracy(gen,m4)
#    dbottom = structureAccuracy(gen,m3)

    logp_top = get_loglikelihood(m1, data) + m1.prior.pdf(m1)
    logp_full_fixed = get_loglikelihood(m2, data) + m2.prior.pdf(m2)
    logp_full = get_loglikelihood(m4, data) + m4.prior.pdf(m4)
    logp_bottom = get_loglikelihood(m3, data) + m3.prior.pdf(m3)


    if (not (round(logp_top,3) <= round(logp_full,3) ) or not (round(logp_full_fixed,3) <= round(logp_full,3))
        or not (round(logp_bottom,3) <= round(logp_full,3)) ):
        raise ValueError


    return np.array([ logp_top, logp_full_fixed, logp_full, logp_bottom ])
Example #2
0
def scoreStructureLearning(N,
                           gen,
                           delta,
                           seed=None,
                           silent=False,
                           skipAfterRNGcalls=False):
    """
    If skipAfterRNGcalls is True, the function terminates after all calls to RNGs have been done.
    """

    #print 'start scoring'

    #    if seed != None:
    #        random.seed(seed)
    #        mixextend.set_gsl_rng_seed(seed)
    #        print '*** given seed=',seed
    #
    #    else: # XXX debug
    #        seed = random.randint(1,999999999)
    #        random.seed(seed)
    #        mixextend.set_gsl_rng_seed(seed)
    #        print '*** random seed=',seed

    data = gen.sampleDataSet(N)

    # XXX update NormalGammaPrior hyperparameters
    for j in range(gen.dist_nr):
        if isinstance(gen.prior.compPrior[j], NormalGammaPrior):
            gen.prior.compPrior[j].setParams(data.getInternalFeature(j), gen.G)

    gen.prior.structPriorHeuristic(delta, data.N)

    print '\nupdating generating model structure:'
    print 'vorher:'
    print gen.leaders
    print gen.groups

    fullEnumerationExhaustive.updateStructureBayesianFullEnumeration(gen,
                                                                     data,
                                                                     silent=1)

    print '\nnachher:'
    print gen.leaders
    print gen.groups

    if silent == False:
        printModel(gen, 'generating model')

    m = copy.copy(gen)
    # reset structure
    m.initStructure()

    # training parameters
    nr_rep = 40  # XXX
    #nr_rep = 4 # XXX

    nr_steps = 400
    em_delta = 0.6

    print 'start training'
    print 'EM repeats:', nr_rep

    m.randMaxTraining(data, nr_rep, nr_steps, em_delta, silent=1, rtype=0)
    print 'finished training'

    if skipAfterRNGcalls == True:
        print '*** Skipping !'
        return np.zeros(4)

#    # check for consistency of component indices (identifiability issues)
#    bad = 0
    if silent == False:
        cmap = {}

        for j in range(gen.dist_nr):
            print '\nfeature:', j

            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j],
                                              gen.components[i2][j])
                print i1, '->', kldists.argmin(), map(
                    lambda x: '%.2f' % float(x), kldists)  # kldists.min()

#        for i1 in range(m.G):
#            print
#            cdists = np.zeros(m.G)
#            for i2 in range(m.G):
#                cdists[i2] = product_distribution_sym_kl_dist(m.components[i1], gen.components[i2])
#                #print i1,i2,product_distribution_sym_kl_dist(m.components[i1], gen.components[i2])
#
#            print i1,'maps to', np.argmin(cdists), cdists.tolist()
#            amin = np.argmin(cdists)
#            if not amin == i1:     # minimal KL distance should occur at equal indices in gen and m
#                bad = 1
#                cmap[i1] = amin

#    if bad:
#
#
#
#        # XXX check whether cmap defines new unambiguous ordering
#
#        # check whether components have switched positions
#        reorder = 0
#        order = range(m.G)
#        try:
#
#            #print cmap
#
#            for i1 in cmap.keys():
#                order[i1] = cmap[i1]
#
#            #print order
#            #print set(order)
#            #print  list(set(order))
#
#            if len(set(order)) == m.G:
#                reorder = 1
#        except KeyError:
#            pass
#        except AssertionError:
#            pass
#
#        if reorder:
#            print '** new order', order
#
#            m.reorderComponents(order)
#
#        else:
#
#
#            #print cdists
#            print i1,'maps to', np.argmin(cdists)
#
#            print 'Failed matching.'
#
#            print 'generating model gen:'
#            print gen
#
#            print 'trained model m:'
#            print m
#
#            raise ValueError

#    mtest =copy.copy(gen)
#    ch = mtest.updateStructureBayesian(data,silent=1)
#    print '\nTEST:',ch
#    for j in range(m.dist_nr):
#        print j,mtest.leaders[j], mtest.groups[j]

#print m.prior

    print '-----------------------------------------------------------------------'
    print '\n True structure:'
    print 'True model post:', mixture.get_loglikelihood(
        gen, data) + gen.prior.pdf(gen)
    #for j in range(m.dist_nr):
    #    print j,gen.leaders[j], gen.groups[j]
    print gen.leaders
    print gen.groups

    if silent == False:
        printModel(m, 'trained model')

    m1 = copy.copy(m)
    t0 = time.time()
    #print '\n\n################# TOPDOWN #####################'
    m1.updateStructureBayesian(data, silent=1)
    t1 = time.time()
    time2 = t1 - t0
    #m1.mapEM(data,40,0.1)
    print '\nTop down (', str(time2), 's ):'
    print m1.leaders
    print m1.groups
    print 'Top down model post:', mixture.get_loglikelihood(
        m1, data) + m1.prior.pdf(m1)
    #    print 'Accuracy:',mixture.structureAccuracy(gen,m1)  # structureEditDistance(gen,m1)

    if silent == False:
        printModel(m1, 'top down model')

    #print '#############################'

    #print '\n\n################# FULL FixedOrder #####################'
    m2 = copy.copy(m)
    t0 = time.time()
    m2.updateStructureBayesianFullEnumerationFixedOrder(data, silent=1)
    t1 = time.time()
    time2 = t1 - t0
    #m2.mapEM(data,40,0.1)

    #    print
    #    for j in range(m2.dist_nr):
    #        print j,m2.leaders[j], m2.groups[j]
    print '\nFull enumeration Fixed Order  (', str(time2), 's ):'
    print m2.leaders
    print m2.groups
    print 'Full fixed order model post:', mixture.get_loglikelihood(
        m2, data) + m2.prior.pdf(m2)
    #    print 'Accuracy:',mixture.structureAccuracy(gen,m2) # structureEditDistance(gen,m1)

    if silent == False:
        printModel(m2, 'full fixed model')

    #print '\n\n################# BOTTUMUP #####################'
    m3 = copy.copy(m)
    t0 = time.time()
    m3.updateStructureBayesianBottomUp(data, silent=1)
    t1 = time.time()
    time2 = t1 - t0
    #m3.mapEM(data,40,0.1)
    #    print
    #    for j in range(m3.dist_nr):
    #        print j,m3.leaders[j], m3.groups[j]
    print '\nBottom up: (', str(time2), 's ):'
    print m3.leaders
    print m3.groups
    print 'Bottom up model post:', mixture.get_loglikelihood(
        m3, data) + m3.prior.pdf(m3)
    #    print 'Accuracy:',mixture.structureAccuracy(gen,m3) # structureEditDistance(gen,m1)

    if silent == False:
        printModel(m3, 'bottom up model')

    #print '\n\n################# FULL enumeration #####################'
    m4 = copy.copy(m)
    t0 = time.time()
    fullEnumerationExhaustive.updateStructureBayesianFullEnumeration(m4,
                                                                     data,
                                                                     silent=0)
    t1 = time.time()
    time2 = t1 - t0
    # m4.mapEM(data,40,0.1)
    #    print
    #    for j in range(m4.dist_nr):
    #        print j,m4.leaders[j], m4.groups[j]
    print '\nFull enumeration: (', str(time2), 's )'
    print m4.leaders
    print m4.groups
    print 'Full enumeration model post:', mixture.get_loglikelihood(
        m4, data) + m4.prior.pdf(m4)
    #    print 'Accuracy:',mixture.structureAccuracy(gen,m4)

    if silent == False:
        printModel(m4, 'full enumeration model')

    print '-----------------------------------------------------------------------'

    #    dtop = structureAccuracy(gen,m1)
    #    dfull_fixed = structureAccuracy(gen,m2)
    #    dfull = structureAccuracy(gen,m4)
    #    dbottom = structureAccuracy(gen,m3)

    logp_top = get_loglikelihood(m1, data) + m1.prior.pdf(m1)
    logp_full_fixed = get_loglikelihood(m2, data) + m2.prior.pdf(m2)
    logp_full = get_loglikelihood(m4, data) + m4.prior.pdf(m4)
    logp_bottom = get_loglikelihood(m3, data) + m3.prior.pdf(m3)

    if (not (round(logp_top, 3) <= round(logp_full, 3))
            or not (round(logp_full_fixed, 3) <= round(logp_full, 3))
            or not (round(logp_bottom, 3) <= round(logp_full, 3))):
        raise ValueError

    return np.array([logp_top, logp_full_fixed, logp_full, logp_bottom])
Example #3
0
def scoreStructureLearning_diffFullVsTopdown(N, gen, delta, seed=None, silent=False, skipAfterRNGcalls = False):
    """
    If skipAfterRNGcalls is True, the function terminates after all calls to RNGs have been done.
    """


    #print 'start scoring'

#    if seed != None:
#        random.seed(seed)
#        mixextend.set_gsl_rng_seed(seed)
#        print '*** given seed=',seed
#
#    else: # XXX debug
#        seed = random.randint(1,999999999)
#        random.seed(seed)
#        mixextend.set_gsl_rng_seed(seed)
#        print '*** random seed=',seed



    data = gen.sampleDataSet(N)



    # XXX update NormalGammaPrior hyperparameters
    for j in range(gen.dist_nr):
        if isinstance(gen.prior.compPrior[j], NormalGammaPrior):
            gen.prior.compPrior[j].setParams(data.getInternalFeature(j), gen.G)

    gen.prior.structPriorHeuristic(delta, data.N)

#    print '\nupdating generating model structure:'
#    print 'vorher:'
#    print gen.leaders
#    print gen.groups


    fullEnumerationExhaustive.updateStructureBayesianFullEnumeration(gen, data, silent=1)

#    print '\nnachher:'
#    print gen.leaders
#    print gen.groups




    m = copy.copy(gen)
    # reset structure
    m.initStructure()

    # training parameters
    nr_rep = 40 # XXX
    #nr_rep = 4 # XXX


    nr_steps = 400
    em_delta = 0.6

#    print 'start training'
#    print 'EM repeats:',nr_rep

    m.randMaxTraining(data,nr_rep, nr_steps,em_delta,silent=1,rtype=0)
#    print 'finished training'

    if skipAfterRNGcalls == True:
        print '*** Skipping !'
        return np.zeros(4)


    m1 = copy.copy(m)
    t0 = time.time()
    #print '\n\n################# TOPDOWN #####################'
    m1.updateStructureBayesian(data,silent=1)
    t1 = time.time()
    time2 = t1-t0
    #m1.mapEM(data,40,0.1)
#    print 'Accuracy:',mixture.structureAccuracy(gen,m1)  # structureEditDistance(gen,m1)



    #print '#############################'


    #print '\n\n################# FULL FixedOrder #####################'
    m2 = copy.copy(m)
    t0 = time.time()
    m2.updateStructureBayesianFullEnumerationFixedOrder(data,silent=1)
    t1 = time.time()
    time2 = t1-t0
    #m2.mapEM(data,40,0.1)

#    print
#    for j in range(m2.dist_nr):
#        print j,m2.leaders[j], m2.groups[j]
#    print 'Accuracy:',mixture.structureAccuracy(gen,m2) # structureEditDistance(gen,m1)





    #print '\n\n################# BOTTUMUP #####################'
    m3 = copy.copy(m)
    t0 = time.time()
    m3.updateStructureBayesianBottomUp(data,silent=1)
    t1 = time.time()
    time2 = t1-t0
    #m3.mapEM(data,40,0.1)
#    print
#    for j in range(m3.dist_nr):
#        print j,m3.leaders[j], m3.groups[j]
#    print 'Accuracy:',mixture.structureAccuracy(gen,m3) # structureEditDistance(gen,m1)




    #print '\n\n################# FULL enumeration #####################'
    m4 = copy.copy(m)
    t0 = time.time()
    fullEnumerationExhaustive.updateStructureBayesianFullEnumeration(m4, data, silent=1)
    t1 = time.time()
    time2 = t1-t0
   # m4.mapEM(data,40,0.1)
#    print
#    for j in range(m4.dist_nr):
#        print j,m4.leaders[j], m4.groups[j]
#    print 'Accuracy:',mixture.structureAccuracy(gen,m4)


    logp_top = get_loglikelihood(m1, data) + m1.prior.pdf(m1)
    logp_full_fixed = get_loglikelihood(m2, data) + m2.prior.pdf(m2)
    logp_full = get_loglikelihood(m4, data) + m4.prior.pdf(m4)
    logp_bottom = get_loglikelihood(m3, data) + m3.prior.pdf(m3)


    if (not (round(logp_top,3) <= round(logp_full,3) ) or not (round(logp_full_fixed,3) <= round(logp_full,3))
        or not (round(logp_bottom,3) <= round(logp_full,3)) ):
        print 'ERROR:'
        print 'top:',logp_top
        print 'full fixed:',logp_full_fixed
        print 'full:',logp_full
        print 'bottom:',logp_bottom,'\n'

        printModel(gen,'generating model')
        printStructure(gen)
        print
        printModel(m4,'full enumeration model')
        printStructure(m4)
        print
        printModel(m2,'fixed full model')
        printStructure(m2)

        raise ValueError

#    # as a measure of separation of the component in the trained model, sum up
#    # sym. KL divergence of all components and features
#    train_diff = 0
#    for j in range(gen.dist_nr):
#        for i1 in range(m.G):
#            for i2 in range(m.G):
#                train_diff += sym_kl_dist(m.components[i1][j], m.components[i2][j])

    mix_dist1 =  mixtureKLdistance(gen,m)
    mix_dist2 =  mixtureKLdistance(m, gen)

    max_dist1 = mixtureMaxKLdistance(gen,m)
    max_dist2 = mixtureMaxKLdistance(m, gen)

    # number of leaders in the full enumeration model
    nr_full_lead = 0
    for ll in m4.leaders:
        nr_full_lead += len(ll)

    match = matchModelStructures(gen, m)

    compred = checkComponentRedundancy(gen.leaders, gen.groups)
    if not(str(logp_top) == str(logp_full_fixed) == str(logp_full) ):

        print '-----------------------------------------------------------------------'

        print 'Different:'
        print 'top:',logp_top
        print 'full fixed:',logp_full_fixed
        print 'full:',logp_full
        print 'bottom:',logp_bottom,'\n'

        explain = 0
        if str(compred) != '[]':
            print '*** redundant components',compred
            explain = 1
        if gen.pi.min() < 0.05:
            print '*** vanishing component in generating model'
            explain = 1
        if m.pi.min() < 0.05:
            print '*** vanishing component in trained model'
            explain = 1

        if explain == 0:
            print '*** UNEXPLAINED !'


        printModel(gen,'generating model')
        printModel(m,'trained model')
        #print 'Trained model diff (simplistic):',train_diff
        print 'D: Mixture distance gen/trained:',mix_dist1
        print 'D: Mixture distance trained/gen:',mix_dist2

        print 'D: Mixture Max-distance gen/trained:',max_dist1
        print 'D: Mixture Max-distance trained/gen:',max_dist2


        print '\nGenerating distances to self:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:',j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(gen.components[i1][j], gen.components[i2][j])
                print map(lambda x:'%.2f' % float(x),kldists)     # kldists.min()

        print '\nTrained distances to self:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:',j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j], m.components[i2][j])
                print map(lambda x:'%.2f' % float(x),kldists)     # kldists.min()


        print '\nTrained distances to generating:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:',j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j], gen.components[i2][j])
                print i1,'->', kldists.argmin(), map(lambda x:'%.2f' % float(x),kldists)     # kldists.min()



        print '\n True structure:'
        print 'True model post:',mixture.get_loglikelihood(gen, data) + gen.prior.pdf(gen)
        #for j in range(m.dist_nr):
        #    print j,gen.leaders[j], gen.groups[j]
        printStructure(gen)

        print '\nTop down:'
        printStructure(m1)
        print 'Top down model post:',mixture.get_loglikelihood(m1, data) + m1.prior.pdf(m1)
        printModel(m1,'top down model')

        print '\nFull enumeration Fixed Order:'
        printStructure(m2)
        print 'Full fixed order model post:',mixture.get_loglikelihood(m2, data) + m2.prior.pdf(m2)
        printModel(m2,'full fixed model')

        print '\nBottom up:'
        printStructure(m3)
        print 'Bottom up model post:',mixture.get_loglikelihood(m3, data) + m3.prior.pdf(m3)
        printModel(m3,'bottom up model')

        print '\nFull enumeration:'
        printStructure(m4)
        print 'Full enumeration model post:',mixture.get_loglikelihood(m4, data) + m4.prior.pdf(m4)
        printModel(m4,'full enumeration model')


        print '-----------------------------------------------------------------------'

    elif str(compred) != '[]' and nr_full_lead > m4.p and match != 1:  # redundant components and not fully merged
        print '-----------------------------------------------------------------------'
        print 'Same but redundant components:', compred



        printModel(gen,'generating model')
        printModel(m,'trained model')
        #print 'Trained model diff:',train_diff
        print 'S: Mixture distance gen/trained:',mix_dist1
        print 'S: Mixture distance trained/gen:',mix_dist2

        print 'S: Mixture Max-distance gen/trained:',max_dist1
        print 'S: Mixture Max-distance trained/gen:',max_dist2


        print '\nGenerating distances to self:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:',j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(gen.components[i1][j], gen.components[i2][j])
                print i1,':', map(lambda x:'%.2f' % float(x),kldists)     # kldists.min()

        print '\nTrained distances to self:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:',j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j], m.components[i2][j])
                print i1,':', map(lambda x:'%.2f' % float(x),kldists)     # kldists.min()


        print '\nTrained distances to generating:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:',j

            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j], gen.components[i2][j])
                print i1,'->', kldists.argmin(), map(lambda x:'%.2f' % float(x),kldists)     # kldists.min()



        print '\n True structure:'
        print 'True model post:',mixture.get_loglikelihood(gen, data) + gen.prior.pdf(gen)
        #for j in range(m.dist_nr):
        #    print j,gen.leaders[j], gen.groups[j]
        printStructure(gen)


        print '\nTop down:'
        printStructure(m1)
        print 'Top down model post:',mixture.get_loglikelihood(m1, data) + m1.prior.pdf(m1)

        print '\nFull enumeration Fixed Order:'
        printStructure(m2)
        print 'Full fixed order model post:',mixture.get_loglikelihood(m2, data) + m2.prior.pdf(m2)

        print '\nBottom up:'
        printStructure(m3)
        print 'Bottom up model post:',mixture.get_loglikelihood(m3, data) + m3.prior.pdf(m3)

        print '\nFull enumeration:'
        printStructure(m4)
        print 'Full enumeration model post:',mixture.get_loglikelihood(m4, data) + m4.prior.pdf(m4)

        print '-----------------------------------------------------------------------'

#    else:
#        print '-----------------------------------------------------------------------'
#        print 'S: Mixture distance gen/trained:',mix_dist1
#        print 'S: Mixture distance trained/gen:',mix_dist2
#        print '-----------------------------------------------------------------------'


#    else:
#        print '** all equal.'


#    dtop = structureAccuracy(gen,m1)
#    dfull_fixed = structureAccuracy(gen,m2)
#    dfull = structureAccuracy(gen,m4)
#    dbottom = structureAccuracy(gen,m3)



    return np.array([ logp_top, logp_full_fixed, logp_full, logp_bottom ])
Example #4
0
def scoreStructureLearning_diffFullVsTopdown(N,
                                             gen,
                                             delta,
                                             seed=None,
                                             silent=False,
                                             skipAfterRNGcalls=False):
    """
    If skipAfterRNGcalls is True, the function terminates after all calls to RNGs have been done.
    """

    #print 'start scoring'

    #    if seed != None:
    #        random.seed(seed)
    #        mixextend.set_gsl_rng_seed(seed)
    #        print '*** given seed=',seed
    #
    #    else: # XXX debug
    #        seed = random.randint(1,999999999)
    #        random.seed(seed)
    #        mixextend.set_gsl_rng_seed(seed)
    #        print '*** random seed=',seed

    data = gen.sampleDataSet(N)

    # XXX update NormalGammaPrior hyperparameters
    for j in range(gen.dist_nr):
        if isinstance(gen.prior.compPrior[j], NormalGammaPrior):
            gen.prior.compPrior[j].setParams(data.getInternalFeature(j), gen.G)

    gen.prior.structPriorHeuristic(delta, data.N)

    #    print '\nupdating generating model structure:'
    #    print 'vorher:'
    #    print gen.leaders
    #    print gen.groups

    fullEnumerationExhaustive.updateStructureBayesianFullEnumeration(gen,
                                                                     data,
                                                                     silent=1)

    #    print '\nnachher:'
    #    print gen.leaders
    #    print gen.groups

    m = copy.copy(gen)
    # reset structure
    m.initStructure()

    # training parameters
    nr_rep = 40  # XXX
    #nr_rep = 4 # XXX

    nr_steps = 400
    em_delta = 0.6

    #    print 'start training'
    #    print 'EM repeats:',nr_rep

    m.randMaxTraining(data, nr_rep, nr_steps, em_delta, silent=1, rtype=0)
    #    print 'finished training'

    if skipAfterRNGcalls == True:
        print '*** Skipping !'
        return np.zeros(4)

    m1 = copy.copy(m)
    t0 = time.time()
    #print '\n\n################# TOPDOWN #####################'
    m1.updateStructureBayesian(data, silent=1)
    t1 = time.time()
    time2 = t1 - t0
    #m1.mapEM(data,40,0.1)
    #    print 'Accuracy:',mixture.structureAccuracy(gen,m1)  # structureEditDistance(gen,m1)

    #print '#############################'

    #print '\n\n################# FULL FixedOrder #####################'
    m2 = copy.copy(m)
    t0 = time.time()
    m2.updateStructureBayesianFullEnumerationFixedOrder(data, silent=1)
    t1 = time.time()
    time2 = t1 - t0
    #m2.mapEM(data,40,0.1)

    #    print
    #    for j in range(m2.dist_nr):
    #        print j,m2.leaders[j], m2.groups[j]
    #    print 'Accuracy:',mixture.structureAccuracy(gen,m2) # structureEditDistance(gen,m1)

    #print '\n\n################# BOTTUMUP #####################'
    m3 = copy.copy(m)
    t0 = time.time()
    m3.updateStructureBayesianBottomUp(data, silent=1)
    t1 = time.time()
    time2 = t1 - t0
    #m3.mapEM(data,40,0.1)
    #    print
    #    for j in range(m3.dist_nr):
    #        print j,m3.leaders[j], m3.groups[j]
    #    print 'Accuracy:',mixture.structureAccuracy(gen,m3) # structureEditDistance(gen,m1)

    #print '\n\n################# FULL enumeration #####################'
    m4 = copy.copy(m)
    t0 = time.time()
    fullEnumerationExhaustive.updateStructureBayesianFullEnumeration(m4,
                                                                     data,
                                                                     silent=1)
    t1 = time.time()
    time2 = t1 - t0
    # m4.mapEM(data,40,0.1)
    #    print
    #    for j in range(m4.dist_nr):
    #        print j,m4.leaders[j], m4.groups[j]
    #    print 'Accuracy:',mixture.structureAccuracy(gen,m4)

    logp_top = get_loglikelihood(m1, data) + m1.prior.pdf(m1)
    logp_full_fixed = get_loglikelihood(m2, data) + m2.prior.pdf(m2)
    logp_full = get_loglikelihood(m4, data) + m4.prior.pdf(m4)
    logp_bottom = get_loglikelihood(m3, data) + m3.prior.pdf(m3)

    if (not (round(logp_top, 3) <= round(logp_full, 3))
            or not (round(logp_full_fixed, 3) <= round(logp_full, 3))
            or not (round(logp_bottom, 3) <= round(logp_full, 3))):
        print 'ERROR:'
        print 'top:', logp_top
        print 'full fixed:', logp_full_fixed
        print 'full:', logp_full
        print 'bottom:', logp_bottom, '\n'

        printModel(gen, 'generating model')
        printStructure(gen)
        print
        printModel(m4, 'full enumeration model')
        printStructure(m4)
        print
        printModel(m2, 'fixed full model')
        printStructure(m2)

        raise ValueError

#    # as a measure of separation of the component in the trained model, sum up
#    # sym. KL divergence of all components and features
#    train_diff = 0
#    for j in range(gen.dist_nr):
#        for i1 in range(m.G):
#            for i2 in range(m.G):
#                train_diff += sym_kl_dist(m.components[i1][j], m.components[i2][j])

    mix_dist1 = mixtureKLdistance(gen, m)
    mix_dist2 = mixtureKLdistance(m, gen)

    max_dist1 = mixtureMaxKLdistance(gen, m)
    max_dist2 = mixtureMaxKLdistance(m, gen)

    # number of leaders in the full enumeration model
    nr_full_lead = 0
    for ll in m4.leaders:
        nr_full_lead += len(ll)

    match = matchModelStructures(gen, m)

    compred = checkComponentRedundancy(gen.leaders, gen.groups)
    if not (str(logp_top) == str(logp_full_fixed) == str(logp_full)):

        print '-----------------------------------------------------------------------'

        print 'Different:'
        print 'top:', logp_top
        print 'full fixed:', logp_full_fixed
        print 'full:', logp_full
        print 'bottom:', logp_bottom, '\n'

        explain = 0
        if str(compred) != '[]':
            print '*** redundant components', compred
            explain = 1
        if gen.pi.min() < 0.05:
            print '*** vanishing component in generating model'
            explain = 1
        if m.pi.min() < 0.05:
            print '*** vanishing component in trained model'
            explain = 1

        if explain == 0:
            print '*** UNEXPLAINED !'

        printModel(gen, 'generating model')
        printModel(m, 'trained model')
        #print 'Trained model diff (simplistic):',train_diff
        print 'D: Mixture distance gen/trained:', mix_dist1
        print 'D: Mixture distance trained/gen:', mix_dist2

        print 'D: Mixture Max-distance gen/trained:', max_dist1
        print 'D: Mixture Max-distance trained/gen:', max_dist2

        print '\nGenerating distances to self:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:', j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(gen.components[i1][j],
                                              gen.components[i2][j])
                print map(lambda x: '%.2f' % float(x),
                          kldists)  # kldists.min()

        print '\nTrained distances to self:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:', j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j],
                                              m.components[i2][j])
                print map(lambda x: '%.2f' % float(x),
                          kldists)  # kldists.min()

        print '\nTrained distances to generating:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:', j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j],
                                              gen.components[i2][j])
                print i1, '->', kldists.argmin(), map(
                    lambda x: '%.2f' % float(x), kldists)  # kldists.min()

        print '\n True structure:'
        print 'True model post:', mixture.get_loglikelihood(
            gen, data) + gen.prior.pdf(gen)
        #for j in range(m.dist_nr):
        #    print j,gen.leaders[j], gen.groups[j]
        printStructure(gen)

        print '\nTop down:'
        printStructure(m1)
        print 'Top down model post:', mixture.get_loglikelihood(
            m1, data) + m1.prior.pdf(m1)
        printModel(m1, 'top down model')

        print '\nFull enumeration Fixed Order:'
        printStructure(m2)
        print 'Full fixed order model post:', mixture.get_loglikelihood(
            m2, data) + m2.prior.pdf(m2)
        printModel(m2, 'full fixed model')

        print '\nBottom up:'
        printStructure(m3)
        print 'Bottom up model post:', mixture.get_loglikelihood(
            m3, data) + m3.prior.pdf(m3)
        printModel(m3, 'bottom up model')

        print '\nFull enumeration:'
        printStructure(m4)
        print 'Full enumeration model post:', mixture.get_loglikelihood(
            m4, data) + m4.prior.pdf(m4)
        printModel(m4, 'full enumeration model')

        print '-----------------------------------------------------------------------'

    elif str(
            compred
    ) != '[]' and nr_full_lead > m4.p and match != 1:  # redundant components and not fully merged
        print '-----------------------------------------------------------------------'
        print 'Same but redundant components:', compred

        printModel(gen, 'generating model')
        printModel(m, 'trained model')
        #print 'Trained model diff:',train_diff
        print 'S: Mixture distance gen/trained:', mix_dist1
        print 'S: Mixture distance trained/gen:', mix_dist2

        print 'S: Mixture Max-distance gen/trained:', max_dist1
        print 'S: Mixture Max-distance trained/gen:', max_dist2

        print '\nGenerating distances to self:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:', j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(gen.components[i1][j],
                                              gen.components[i2][j])
                print i1, ':', map(lambda x: '%.2f' % float(x),
                                   kldists)  # kldists.min()

        print '\nTrained distances to self:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:', j
            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j],
                                              m.components[i2][j])
                print i1, ':', map(lambda x: '%.2f' % float(x),
                                   kldists)  # kldists.min()

        print '\nTrained distances to generating:'
        cmap = {}
        for j in range(gen.dist_nr):
            print 'feature:', j

            for i1 in range(m.G):
                kldists = np.zeros(m.G)
                for i2 in range(m.G):
                    kldists[i2] = sym_kl_dist(m.components[i1][j],
                                              gen.components[i2][j])
                print i1, '->', kldists.argmin(), map(
                    lambda x: '%.2f' % float(x), kldists)  # kldists.min()

        print '\n True structure:'
        print 'True model post:', mixture.get_loglikelihood(
            gen, data) + gen.prior.pdf(gen)
        #for j in range(m.dist_nr):
        #    print j,gen.leaders[j], gen.groups[j]
        printStructure(gen)

        print '\nTop down:'
        printStructure(m1)
        print 'Top down model post:', mixture.get_loglikelihood(
            m1, data) + m1.prior.pdf(m1)

        print '\nFull enumeration Fixed Order:'
        printStructure(m2)
        print 'Full fixed order model post:', mixture.get_loglikelihood(
            m2, data) + m2.prior.pdf(m2)

        print '\nBottom up:'
        printStructure(m3)
        print 'Bottom up model post:', mixture.get_loglikelihood(
            m3, data) + m3.prior.pdf(m3)

        print '\nFull enumeration:'
        printStructure(m4)
        print 'Full enumeration model post:', mixture.get_loglikelihood(
            m4, data) + m4.prior.pdf(m4)

        print '-----------------------------------------------------------------------'

#    else:
#        print '-----------------------------------------------------------------------'
#        print 'S: Mixture distance gen/trained:',mix_dist1
#        print 'S: Mixture distance trained/gen:',mix_dist2
#        print '-----------------------------------------------------------------------'

#    else:
#        print '** all equal.'

#    dtop = structureAccuracy(gen,m1)
#    dfull_fixed = structureAccuracy(gen,m2)
#    dfull = structureAccuracy(gen,m4)
#    dbottom = structureAccuracy(gen,m3)

    return np.array([logp_top, logp_full_fixed, logp_full, logp_bottom])