Example #1
0
if __name__ == "__main__":

    ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    ### MPI map
    ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    try:
        from SimpleMPI.MPI_map import MPI_map, is_master_process
    except ImportError:
        MPI_map = map
        is_master_process = lambda: True

    allret = MPI_map(run, map(lambda x: [x], [0.01, 0.1, 1.0] * 100 ))

    if is_master_process():

        allfbs = FiniteBestSet(max=True)
        allfbs.merge(allret)

        H = allfbs.get_all()

        for h in H:
            h.likelihood_temperature = 0.01 # on what set of data we want?
            h.compute_posterior(data)

        # show the *average* ll for each hypothesis
        for h in sorted(H, key=lambda h: h.posterior_score):
            print h.posterior_score, h.prior, h.likelihood, h.likelihood_temperature
            print h
Example #2
0
	"""
	# initialize the data
	data = generate_data(data_size)
	
	# starting hypothesis -- here this generates at random
	h0 = NumberExpression(G)
	
	hyps = FiniteBestSet(max=True, N=options.TOP_COUNT, key="posterior_score") 
	hyps.add( mh_sample(h0, data, options.STEPS, trace=False) )
	
	return hyps
	
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Main running

if is_master_process():
	display_option_summary(options)
	
# choose the appropriate map function
allret = MPI_map(run, map(lambda x: [x], options.DATA_AMOUNTS * options.CHAINS)) 

# Handle all of the output
allfs = FiniteBestSet(max=True)
allfs.merge(allret)
allfs.save(options.OUT_PATH) # save this in a file

## If we want to print the summary with the "large" data size (average posterior score computed empirically)
if options.LARGE_DATA_SIZE > 0 and is_master_process():
	
	#now evaluate on different amounts of data too:
	huge_data = generate_data(options.LARGE_DATA_SIZE)
Example #3
0
    from LOTlib.Inference.MetropolisHastings import mh_sample
    for h in lot_iter(mh_sample(h0, data, SAMPLES)):
        fbs.add(h, h.posterior_score)

    return fbs


## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### MPI map
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

from SimpleMPI.MPI_map import MPI_map, is_master_process

allret = MPI_map(run, map(lambda x: [x], [0.01, 0.1, 1.0] * 100 ))

if is_master_process():

    allfbs = FiniteBestSet(max=True)
    allfbs.merge(allret)

    H = allfbs.get_all()

    for h in H:
        h.likelihood_temperature = 0.01 # on what set of data we want?
        h.compute_posterior(data)

    # show the *average* ll for each hypothesis
    for h in sorted(H, key=lambda h: h.posterior_score):
        print h.posterior_score, h.prior, h.likelihood, h.likelihood_temperature
        print h
Example #4
0
File: Search.py Project: sa-/LOTlib
    """
    # initialize the data
    data = generate_data(data_size)

    # starting hypothesis -- here this generates at random
    h0 = NumberExpression(grammar)

    hyps = FiniteBestSet(max=True, N=options.TOP_COUNT, key="posterior_score")
    hyps.add( mh_sample(h0, data, options.STEPS, trace=False) )

    return hyps

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Main running

if is_master_process():
    display_option_summary(options)

# choose the appropriate map function
allret = MPI_map(run, map(lambda x: [x], options.DATA_AMOUNTS * options.CHAINS))

# Handle all of the output
allfs = FiniteBestSet(max=True)
allfs.merge(allret)

import pickle
with open(options.OUT_PATH, 'w') as f:
    pickle.dump(allfs, f)

## If we want to print the summary with the "large" data size (average posterior score computed empirically)
if options.LARGE_DATA_SIZE > 0 and is_master_process():