예제 #1
0
    def testMonteCarlo1(self):
        print("====== MonteCarlo 1 ===================")

        N = 100
        ran = numpy.random
        ran.seed(12345)
        noise = ran.standard_normal(N)
        x = numpy.arange(N, dtype=float) - 3
        nn = 0.1
        for k in range(5):
            y = noise * nn

            m = PolynomialModel(0)
            ftr = Fitter(x, m)
            par = ftr.fit(y)
            std = ftr.getStandardDeviations()
            chisq = ftr.chisq

            mc = MonteCarlo(x, m, ftr.covariance)
            mc.mcycles = 1000
            lmce = ftr.monteCarloError(monteCarlo=mc)

            print("noise  : ", fmt(nn),
                  "===========================================")
            print("params : ", fmt(par, format="%8.5f"))
            print("stdevs : ", fmt(std, format="%8.5f"))
            print("scale  : ", fmt(ftr.scale, format="%8.5f"), fmt(nn))
            print("chisq  : ", fmt(chisq, format="%8.5f"),
                  fmt(mc._eigenvalues, format="%8.5f"),
                  fmt(mc._eigenvectors, format="%8.5f"))
            print("covar  : ", fmt(ftr.covariance, format="%8.5f"))
            print("mcerr  : ", fmt(lmce[0], format="%8.5f"))
            self.assertTrue(abs(std[0] - lmce[0]) < 0.1 * std[0])
            self.assertTrue(par[0] < 0.05 * nn)
            nn *= 10
예제 #2
0
def run_trial(planning_horizon):

    blocks_world_builder = BlocksWorldBuilder(blocks_world_size)
    ctrl = SimpleMonteCarloControl()
    planner = Planner(planning_horizon)
    mc = MonteCarlo(blocks_world_builder,
                    planner,
                    control=ctrl,
                    max_episode_length=blocks_world_size * 2,
                    planning_factor=0,
                    plan_on_empty_policy=True,
                    exploring_starts=True,
                    exploring_factor=0)

    mc.learn_policy(number_episodes=number_of_episodes,
                    show_progress_bar=True,
                    evaluate_return_ratio=False)

    data = pd.DataFrame({
        'episode': range(len(mc.returns)),
        #'return_ratio': mc.return_ratios,
        'observed_returns': mc.returns,
        #'optimal_returns': mc.optimal_returns
    })

    return data
예제 #3
0
def test_accept_check_positive_energy_gap():
	temp = np.random.uniform(100)
	mc = MonteCarlo(temp,1,np.array([1]))
	d0 = np.random.random_integers(0,100,100)
	d1 = np.random.random_integers(0,100,100)

	e0 = energy(d0)
	e1 = energy(d1)

	energyDiff = e1 - e0
	
	# we want an instance that will trigger the calculation of p0 and p1,
	# i.e. a positive energy gap

	while energyDiff < 0:
		d0 = np.random.random_integers(0,100,100)
		d1 = np.random.random_integers(0,100,100)

		e0 = energy(d0)
		e1 = energy(d1)

		energyDiff = e1 - e0

	p0 = calculateP0(energyDiff,temp)
	noP1 = p0+((1-p0)*0.1)
	yesP1 = p0-((1-p0)*0.1)

	mc.generateP1 = Mock(name="generateP1", return_value=yesP1)

	assert(mc.checkIfAcceptMove(d0,d1))

	mc.generateP1 = Mock(name="generateP1", return_value=noP1)

	assert(not mc.checkIfAcceptMove(d0,d1))	
예제 #4
0
    def monteCarloError(self, xdata=None, monteCarlo=None):
        """
        Calculates :math:\sigma:math:-confidence regions on the model given some inputs.

        From the full covariance matrix (inverse of the Hessian) random
        samples are drawn, which are added to the parameters. With this new
        set of parameters the model is calculated. This procedure is done
        by default, 25 times.
        The standard deviation of the models is returned as the error bar.

        The calculation of the confidence region is delegated to the class
        MonteCarlo. For tweaking of that class can be done outside BaseFitter.

        Parameters
        ----------
        xdata : array_like
            input data over which to calculate the error bars.
        monteCarlo : MonteCarlo
            a ready-made MonteCarlo class.

        """
        if xdata is None: xdata = self.xdata
        if monteCarlo is None:
            monteCarlo = MonteCarlo(xdata,
                                    self.model,
                                    self.covariance,
                                    index=self.fitIndex)

        return monteCarlo.getError(xdata)
예제 #5
0
    def testMonteCarlo3(self, doplot=False):
        print("====== MonteCarlo 3 ===================")

        N = 101
        x = numpy.arange(N, dtype=float) * 0.1
        ran = numpy.random
        ran.seed(1235)
        noise = ran.standard_normal(N)

        ym = x * x + 0.03 * x + 0.05
        y1 = ym + 10 * noise

        pm = PolynomialModel(2)

        ftr = Fitter(x, pm)

        pars1 = ftr.fit(y1)
        stdv1 = ftr.getStandardDeviations()
        print("parameters : ", pars1)
        print("std devs   : ", stdv1)
        print("chisquared : ", ftr.chisq)

        lmce = ftr.monteCarloError()
        chisq = ftr.chisq

        mce = MonteCarlo(x, pm, ftr.covariance)
        mce1 = mce.getError()
        assertAAE(lmce, mce1)

        yfit = pm.result(x)
        s2 = numpy.sum(numpy.square((yfit - ym) / lmce))
        print(s2, math.sqrt(s2 / N))

        integral = numpy.sum(yfit)
        s1 = 0
        s2 = 0
        k = 0
        for k in range(1, 100001):
            rv = mce.randomVariant(x)
            s1 += numpy.sum(rv)
            s2 += numpy.sum(numpy.square(rv - yfit))
            if k % 10000 == 0:
                print("%6d  %10.3f %10.3f %10.3f" %
                      (k, integral, s1 / k, math.sqrt(s2 / k)))

        ### TBC  dont know why the factor 1000 is there. ########
        print(abs(integral - s1 / k), math.sqrt(s2 / (k * 1000)))
        self.assertTrue(abs(integral - s1 / k) < math.sqrt(s2 / (k * 1000)))

        if doplot:
            pyplot.plot(x, y1, 'b.')

            pyplot.plot(x, ym, 'k-')
            pyplot.plot(x, yfit, 'g-')
            pyplot.plot(x, yfit + lmce, 'r-')
            pyplot.plot(x, yfit - lmce, 'r-')
            pyplot.show()
예제 #6
0
def test_equal_probability():
    """ Check particles have equal probability of movement. """
    from numpy import array, sqrt, count_nonzero

    energy = MagicMock()

    density = array([1, 0, 99])
    mc = MonteCarlo(energy, density)
    changes_at_zero = [(density - mc.change_density(density))[0] != 0
                       for i in range(10000)]
    assert count_nonzero(changes_at_zero) == approx(
        0.01 * len(changes_at_zero), 0.5 * sqrt(len(changes_at_zero)))
예제 #7
0
    def run( self, runid ):
        ''' run id is a identification for a model to dump '''
        self.__fragidx_pose.clear()
        self.__fragidx_pose.initialization( self._scorefxn.get_density_score_dict(), self._initialization ) # default initialize by random
        #print self.__fragidx_pose.show_state( "initialization" )

        mc = MonteCarlo( self._scorefxn, self._temperature )
        mc.apply( self.__fragidx_pose, self._steps ) # to prevent a silly bug that at the very first state SCORE==0, such that you wouldn't lower than that during high temperatures sampling
        self._lowest_score_pose = self.__fragidx_pose.clone()

        tracker = TrajectoryTracker( runid )
        tag = "model_" + str( runid )

        while self._temperature >=0 : # it would never reach this criteria
            if self._quench: 
                self.recover_low() # retrieve the lowest-score pose from previous runs

            mc.apply( self.__fragidx_pose, self._steps )

            residual_pose = self._scorefxn.residualize_pose() # residual_pose will be filled with
            #print residual_pose.show_state( tag + "_" + str( self._temperature ) )

            self.set_annealing_temperature() # update self._temperature
            if self._temperature == mc.get_temperature(): break

            mc.set_temperature( self._temperature )

            if self._record_trajectory: tracker.save( self._temperature, self.__fragidx_pose )

        tracker.save( self._temperature, self._scorefxn.residualize_pose() )
        tracker.dump_pickle( tag )
        residual_pose.show_state( tag + "_final", True ) # True for verbose showing all residues states
예제 #8
0
def drawMonteCarlo():
    iterations = [10, 100, 1000, 10000, 100000, 500000, 1000000]
    for iteration in iterations:
        print('Creating Monte Carlo Agent...')
        monti = MonteCarlo(100)
        print('Monte Carlo created')
        print('Training Monte Carlo for', iteration, 'iterations.')
        monti.train(iteration)
        print('Training completed, plotting image')
        figure = plt.figure('Monte' + str(iteration))
        b = figure.add_subplot(111, projection='3d')
        resultfig = plotMonte(b, monti)
        figure.savefig('MonteCarlo' + str(iteration) + '.png')
        plt.show()
예제 #9
0
def main():
    begin = datetime.utcnow()
    timelimit = timedelta(seconds=60)
    b = Board()
    mc = MonteCarlo(b, seconds=4)
    player_one_wins = 0
    player_two_wins = 0
    draws = 0
    while (datetime.utcnow() - begin < timelimit):
        winner, _ = self_play(mc, b)
        if winner == 1:
            player_one_wins += 1
        elif winner == -1:
            player_two_wins += 1
        elif winner == 0:
            draws += 1
        else:
            print("Error, unknown winner returned:", winner)
    total_played = player_one_wins + player_two_wins + draws
    print("Total games played:", total_played)
    print("Player one wins:", player_one_wins, " or ",
          (player_one_wins / total_played) * 100, "%")
    print("Player two wins:", player_two_wins, " or ",
          (player_two_wins / total_played) * 100, "%")
    print("Draws:", draws, " or ", (draws / total_played) * 100, "%")
예제 #10
0
def main():
    print("Would you like to go 1st or 2nd?\n    Go 1st: 1\n    Go 2nd: 2")
    if int(input()) == 2:
        players = {-1: "Human", 1: 'AI'}
    else:
        players = {1: "Human", -1: 'AI'}
    board = Board()
    mc = MonteCarlo(board, seconds=3)
    game_history = []
    game_state = board.start()
    game_history.append(game_state)
    mc.update(game_state)
    legals = board.legal_plays(game_history)
    winner = board.winner(game_history)
    board.show(game_history[-1])
    while legals and winner == 0:
        current_player = board.current_player(game_state)
        #print(current_player)
        if players[current_player] == 'Human':
            print("Please enter the square you'd like to play: ")
            pos = int(input())
            game_state = board.next_state(game_state, (pos, current_player))
        elif players[board.current_player(game_state)] == 'AI':
            print("AI is thinking....")
            game_state = board.next_state(game_state, mc.get_play())
        mc.update(game_state)
        game_history.append(game_state)
        legals = board.legal_plays([game_state])
        winner = board.winner([game_state])
        board.show(game_history[-1])

    print("The game is over!\n Plauer: ", winner, "has won")
class MonteCarloTest(unittest.TestCase):

	def setUp(self):
		self.foo = MonteCarlo()

	def test_inCircle(self):
		self.assertTrue(self.foo.inCircle(0,0))

	def test_pyPi(self):
		self.foo.calcMonteCarlo(1000000)
		pi = self.foo.circleArea / self.foo.squareArea
		self.assertGreater(pi, 2)
		self.assertLess(pi, 4)

	def test_linPi(self):
		self.foo.calcMonteCarloLinGen(1000000)
		pi = self.foo.circleArea / self.foo.squareArea
		self.assertGreater(pi, 2)
		self.assertLess(pi, 4)
예제 #12
0
def drawForAllLambdas():
    montecarlo = MonteCarlo(100)
    print('Training Monte Carlo')
    montecarlo.train(500000)
    print('Training of Monte Carlo Completed')
    lambdas = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
    squareMean = []
    numberElements = montecarlo.Q.shape[0] * montecarlo.Q.shape[1] * 2
    for lambdaValue in lambdas:
        sarsa = SARSA(100, lambdaValue)
        print('Training SARSA', lambdaValue)
        sarsa.train(1000)
        print('Training of SARSA Completed')
        squareMeanCalc = np.sum(
            np.square(sarsa.Q - montecarlo.Q)) / float(numberElements)
        squareMean.append(squareMeanCalc)
    fig = plt.figure("SARSA")
    surf = plt.plot(lambdas[1:10], squareMean[1:10])
    fig.savefig('lambdaALL.png')
    plt.show()
예제 #13
0
def drawForLambdaZero():
    montecarlo = MonteCarlo(100)
    print('Training Monte Carlo')
    montecarlo.train(500000)
    print('Training of Monte Carlo Completed')
    lambdaValue = 0
    learningRate = []
    learningRateIndex = []
    sarsa = SARSA(100, lambdaValue)
    print('Training SARSA and plotting graph')
    for i in range(1000):
        learningRateIndex.append(i)
        sarsa.train(1)
        squareMean = np.sum(np.square(sarsa.Q - montecarlo.Q)) / float(1000)
        learningRate.append(squareMean)

    fig = plt.figure("SARSAZERO")
    surf = plt.plot(learningRateIndex, learningRate)
    fig.savefig('lambdaZero.png')
    plt.show()
예제 #14
0
def test_accept_change():
    """ Check that move is accepted if second energy is lower """
    from numpy import sqrt, count_nonzero, exp

    energy = MagicMock
    mc = MonteCarlo(energy, [1, 1, 1], temperature=100.0)
    # Should always be true.
    # But do more than one draw,
    # in case randomness incorrectly crept into
    # implementation
    for i in range(10):
        assert mc.accept_change(0.5, 0.4)
        assert mc.accept_change(0.5, 0.5)

    # This should be accepted only part of the time,
    # depending on exponential distribution
    prior, successor = 0.4, 0.5
    accepted = [mc.accept_change(prior, successor) for i in range(10000)]
    assert count_nonzero(accepted) / float(len(accepted)) == approx(
        exp(-(successor - prior) / mc.temperature), 3e0 / sqrt(len(accepted)))
def ReachPrecisionMonteCarlo(eps, n):
    workbook = xlsxwriter.Workbook('ExcelFileRoot')
    worksheet = workbook.add_worksheet('Sheet1')
    listResult = []
    for k in range(0, n):
        i = 1
        while (abs(math.pi - MonteCarlo(i)) > eps):
            i += 1
        worksheet.write_number(k, 2, i)
    workbook.close()
    return listResult
예제 #16
0
def main( args ):

    wts = Weights( args.density_score_wt, args.overlap_score_wt, args.closab_score_wt, args.clash_score_wt )
    scorefxn = ScoreFunction( args.density_scorefile, args.overlap_scorefile, args.nonoverlap_scorefile, wts, args.null_frag_score )

    for model in range( args.nstruct ):

        pose = Pose() # empty pose
        pose.initialization( scorefxn._density_score_dict, args.initialization ) # default initialize by random
        pose.show_state( "initialization" )

        temp = args.starting_temperature
        steps = args.steps_per_temp
        mc = MonteCarlo( scorefxn, temp )

        trajectory_tracker_dict = {}

        while temp>=0 or anneal_temp==0: # it would never reach this criteria
            run_tag = "model_" + str( model ) + "_temp_" + str( temp )

            mc.apply( pose, steps )
            pose.show_state( run_tag )
            #pose.dump_pickle( run_tag + ".pickle" )
            trajectory_tracker_dict[ run_tag ] = pose

            anneal_temp = round( temp*args.cooling_rate, 1 )
            temp -= anneal_temp

            if temp == mc.temperature(): break
            mc.set_temperature( temp )

        pickle.dump( trajectory_tracker_dict, open( "model_" + str( model ) + ".pickle", "w" ) )
        pose.show_state( "model_" + str( model ) + "_end", True ) # True for verbose
예제 #17
0
def main( args ):
    print print_args( args )

    wts = Weights( args.density_score_wt, args.overlap_score_wt, args.closab_score_wt, args.clash_score_wt )
    scorefxn = ScoreFunction( args.density_scorefile, args.overlap_scorefile, args.nonoverlap_scorefile, wts, args.null_frag_score )
    pose = Pose() # empty pose
    pose.initialization( scorefxn._density_score_dict ) # default initialize by random

    temp = args.temperature
    mc = MonteCarlo( scorefxn, temp )

    for each_round in range( 1, args.round+1 ):

        mc.apply( pose, args.steps )
        pose.show_state( temp )

        temp -= round( temp*0.1, 2 )
        if temp <= 0: break

        mc.set_temperature( temp )

    pose.dump_pickle()
예제 #18
0
    def testMonteCarlo2(self, doplot=False):
        print("====== MonteCarlo 2 ===================")

        x = numpy.arange(7, dtype=float) - 3
        y = numpy.asarray([-1, -1, -1, 0, 1, 1, 1], dtype=float)
        m = PolynomialModel(1)
        ftr = Fitter(x, m)
        par = ftr.fit(y)
        std = ftr.getStandardDeviations()
        yfit = m.result(x)
        chisq = ftr.chisq
        hes = ftr.hessian
        #        mc = MonteCarlo( x, m, chisq, hessian=hes )
        mc = MonteCarlo(x, m, ftr.covariance)
        mc.mcycles = 1000
        lmce = ftr.monteCarloError(monteCarlo=mc)

        print("params : ", par)
        print("stdevs : ", std)
        print("scale  : ", ftr.getScale())
        print("chisq  : ", chisq)
        print("evals  : ", mc._eigenvalues)
        print(mc._eigenvectors)
        print("hessi  :\n", ftr.hessian)
        print("covar  :\n", ftr.covariance)
        print("mcerr  : ", lmce)

        numpy.testing.assert_array_almost_equal(
            par, numpy.asarray([0.0, 0.42857142857142855]))
        #        numpy.testing.assert_array_almost_equal( std, numpy.asarray([0.1564921592871903,0.07824607964359515]) )
        self.assertAlmostEqual(chisq, 0.857142857143)

        if doplot:
            pyplot.plot(x, y, 'k*')
            pyplot.plot(x, yfit, 'g-')
            pyplot.plot(x, yfit + lmce, 'r-')
            pyplot.plot(x, yfit - lmce, 'r-')
            pyplot.show()
예제 #19
0
def test_move_particle_one_over():
    """ Check density is change by a particle hopping left or right. """
    from numpy import nonzero, multiply
    from numpy.random import randint

    energy = MagicMock()

    for i in range(100):
        # Do this n times, to avoid
        # issues with random numbers
        # Create density

        density = randint(50, size=randint(2, 6))
        mc = MonteCarlo(energy, density)
        # Change it
        new_density = mc.change_density(density)

        # Make sure any movement is by one
        indices = nonzero(density - new_density)[0]
        assert len(indices) == 2, "densities differ in two places"
        assert (multiply.reduce(
            (density -
             new_density)[indices]) == -1), "densities differ by + and - 1"
예제 #20
0
def test_accept_check_negative_energy_gap():
	temp = np.random.uniform(100)
	mc = MonteCarlo(temp,1,np.array([1]))
	d0 = np.random.random_integers(0,100,100)
	d1 = np.random.random_integers(0,100,100)

	e0 = energy(d0)
	e1 = energy(d1)

	energyDiff = e1 - e0
	
	# we want an instance that will trigger the calculation of p0 and p1,
	# i.e. a positive energy gap

	while energyDiff >= 0:
		d0 = np.random.random_integers(0,100,100)
		d1 = np.random.random_integers(0,100,100)

		e0 = energy(d0)
		e1 = energy(d1)

		energyDiff = e1 - e0

	assert(mc.checkIfAcceptMove(d0,d1))
    def test_02_MonteCarlo(self):
        """
        Varying the time step size
        """
        Asset = 100.0
        Strike = 100.0
        InterestRate = 0.05
        Volatility = 0.2
        Expiration = 1.0
        NumberAssetStep = 100
        TimeStep = Expiration / NumberAssetStep
        NumbreOfSimulation = 10000

        listOfValuesList = []

        for i in range(0, 10):
            """
            Price with Black and Scholes and Monte Carlo and print the differences
            """
            BS = BlackScholes(Asset, Strike, InterestRate, Volatility,
                              Expiration)
            BS.price()

            MC = MonteCarlo(Asset, Strike, InterestRate, Volatility,
                            Expiration, TimeStep, NumberAssetStep,
                            NumbreOfSimulation)
            MC.price()

            err = BS.getPrice() - MC.getPrice()
            self.assertGreater(BS.getPrice(), 0.0)

            valuesList = [
                Asset, Strike, InterestRate, Volatility, Expiration, TimeStep,
                NumberAssetStep, NumbreOfSimulation,
                BS.getPrice(),
                MC.getPrice(), err,
                math.fabs(err / BS.getPrice())
            ]

            listOfValuesList.append(valuesList)

            NumberAssetStep += 100
            TimeStep = Expiration / NumberAssetStep

        headerList = [
            'Asset', 'Strike', 'IntRate', 'Vol', 'Expiration', 'TimeStep',
            'NumberAssetStep', 'NumbreOfSimulation', 'BSPrice', 'MCPrice',
            'ErrorWithBS', 'ErrWithBSPer'
        ]

        writeToFile("binaryCallMCPriceTest2.csv", headerList)

        for valuesList in listOfValuesList:
            writeToFile("binaryCallMCPriceTest2.csv", valuesList)
예제 #22
0
def test_main_algorithm():
    import numpy as np
    from numpy import testing
    from unittest.mock import Mock

    density = [1, 1, 1, 1, 1]
    energy = MagicMock()
    mc = MonteCarlo(energy, density, itermax=5)

    acceptance = [True, True, True, True, True]
    mc.accept_change = Mock(side_effect=acceptance)
    mc.random_agent = Mock(side_effect=[0, 1, 2, 3, 4])
    mc.random_direction = Mock(side_effect=[1, 1, 1, 1, -1])
    np.testing.assert_equal(mc.step()[1], [0, 1, 1, 2, 1])
예제 #23
0
def test_input_sanity():
    """ Check incorrect input do fail """
    energy = MagicMock()

    with raises(NotImplementedError) as exception:
        MonteCarlo(sum, [1, 1, 1], 0e0)
    with raises(ValueError) as exception:
        MonteCarlo(energy, [1, 1, 1], temperature=-1e0)

    with raises(TypeError) as exception:
        MonteCarlo(energy, [1.0, 2, 3])
    with raises(ValueError) as exception:
        MonteCarlo(energy, [-1, 2, 3])
    with raises(ValueError) as exception:
        MonteCarlo(energy, [[1, 2, 3], [3, 4, 5]])
    with raises(ValueError) as exception:
        MonteCarlo(energy, [3])
    with raises(ValueError) as exception:
        MonteCarlo(energy, [0, 0])
예제 #24
0
    def test_next_state(self):

        self.game.board.reset_test_env()

        #self.game.board._cells[1][2].set_empty()

        initial_board = deepcopy(self.game.board)

        this_board = self.game.board

        #w_piece = self._cells[1][3].set_holding(Piece(self._players[1]))

        avaiable_moves_p1 = self.game.board.avaiable_moves(
            this_board._players[0], flat=True)

        moves_states = []
        for legal_play in avaiable_moves_p1:
            #next_play_board = MonteCarlo.next_state(this_board, legal_play)
            #self.assertFalse(next_play_board == initial_board, "Board of next play are the same")
            #pprint(legal_play[1].pos)
            if (legal_play[1].pos == [1, 4]):
                #print("Player 0 number of cells : %d" % len(this_board.get_player_cells(next_play_board._players[0])))
                #print("Player 1 number of cells : %d" % len(this_board.get_player_cells(next_play_board._players[1])))

                print("BEFORE BOARD")
                this_board.print()
                print("After Board")

                self.assertFalse(
                    MonteCarlo.next_state(this_board,
                                          legal_play) == this_board,
                    "Board of next play are the same")

                print("Found equal 1,4")
                print("Init board value", end=". ")
                pprint(board_value(initial_board))
                print(", 1,4 move board val", end=". ")
예제 #25
0
from MonteCarlo import MonteCarlo
from TDLambda import TDLambda
from LinFuncApprox import LinFuncApprox
import matplotlib.pyplot as plt
from plotQValues import plotQValues
'''Monte Carlo'''
mc = MonteCarlo()
q_mc, n_mc = mc.run_episodes(10000)
# plotQValues(q_mc)
'''TD Lambda'''
# td = TDLambda(0.5)
# td.run_episodes(100000)
# q_td = td.q_values()
# plotQValues(q_td)
#
# mse = []
# lmbda = []
# for i in range(0, 10):
#     lmbda.append(i / 10.)
#     td = TDLambda(i / 10.)
#     q_td, n_td = td.run_episodes(10000)
#
#     error = (q_td - q_mc) ** 2
#     mse.append(sum(sum(sum(error * 1./ (2 * 21 * 10)))))
#
# plt.plot(lmbda, mse)
#
# plt.show()
'''Linear Function Approximation'''
# lin = LinFuncApprox()
# lin.run_episodes(10000)
# grab map information
mission_xml = BeautifulSoup(env.params['mission_xml'], features="xml")
map_spec = mission_xml.find('specification')
placement = mission_xml.find('Placement')
map_dimension = [
    int(map_spec.contents[1].text),
    int(map_spec.contents[2].text),
    int(map_spec.contents[3].text)
]
mission_available_moves = env.params['comp_all_commands']

num_episodes = 300
gamma = [1, .6, .3]
alpha = [1, .6, .3]
max_simulation_time = 120

# Input learning method
# MC - monte carlo, Q - Q learning
algorithm = 'Q'

for g in gamma:
    for a in alpha:
        if algorithm == 'MC':
            # instantiate an Agent object
            mc = MonteCarlo(mission_name, env, num_episodes, g,
                            max_simulation_time, a)
            mc.mc_prediction(filename='', iteration_number=0)
        elif algorithm == 'Q':
            # instantiate an Agent object
            q = Q(mission_name, env, num_episodes, g, a, max_simulation_time)
            q.q_prediction()
예제 #27
0
    mc.update(game_state)
    mc.get_play()


def self_play(mc, b):
    game_history = []
    game_state = b.start()
    game_history.append(game_state)
    #print(b.start())
    mc.update(game_state)
    legals = b.legal_plays([game_state])
    winner = b.winner([game_state])
    while legals and winner == 0:
        game_state = b.next_state(game_state, mc.get_play())
        mc.update(game_state)
        game_history.append(game_state)
        legals = b.legal_plays([game_state])
        winner = b.winner([game_state])

    return winner, game_history


if __name__ == '__main__':
    b = Board()
    mc = MonteCarlo(b, seconds=0.3)
    winner, hist = self_play(mc, b)
    for state in hist:
        print("")
        Board().show(state)
    print("\nWinner: ", winner)
예제 #28
0
 def test_add(self):
     # test that 1 + 1 = 2
     self.assertEqual(MonteCarlo.add(1, 1), 2)
예제 #29
0
 def test_subtract(self):
     # test that 1 - 1 = 0
     self.assertEqual(MonteCarlo.subtract(1, 1), 0)
예제 #30
0
print(female_to_male_reject)

female_to_male_accept_prob = (female_to_male_accept /
                              (female_to_male_accept + female_to_male_reject))
female_to_male_reject_prob = (female_to_male_reject /
                              (female_to_male_accept + female_to_male_reject))
# no_change_prob = (no_change/X.shape[0])

print("The probability of female_to_male_accept_prob = ",
      female_to_male_accept_prob)
print("The probability of female_to_male_reject_prob = ",
      female_to_male_reject_prob)
sys.stdout = orig_stdout
f.close()
##########################
"""
MonteCarlo()

"""

# plt.hist(X['applicant_sex'])
# sns.catplot(x=[1,2,3,4], y=X.groupby("applicant_sex").count().accden, data=X)
# plt.show()

# plt.hist(need)
# plt.show()
# plt.hist(reason)
# plt.show()

# tune_model(X,y,n_it = 50,models = ['RandomForest','xgb','Logistic'])
예제 #31
0
 def monteCarloAIPlay(self):
     mcObj = MonteCarlo(self.gameState, self.name)
     mcObj.update(self.gameState.cardsPlayed)
     card = mcObj.getPlay()
     return card
예제 #32
0
파일: Player.py 프로젝트: GonVas/morelli
 def __init__(self, board, color, turn_time, dificulty=0):
     super().__init__(color)
     self.dificulty = dificulty
     self.ignore_mouse = True
     self.turn_time = turn_time
     self.monte = MonteCarlo(board, self , self.turn_time)
예제 #33
0
def main2():
    b = Board()
    mc = MonteCarlo(b, seconds=20)
    game_state = b.start()
    mc.update(game_state)
    mc.get_play()
예제 #34
0
step_size_parameters = [1, 0.8, 0.3, 0.03]
""" SETUP EXPERIMENT """

experiments = []

for _ in range(number_of_trials):

    # Control case: Monte carlo control such as in the bachelor's project, without planning.

    blocks_world_builder = BlocksWorldBuilder(blocks_world_size)
    planner = Planner(planning_horizon)
    ctrl = SimpleMonteCarloControl()
    mc = MonteCarlo(blocks_world_builder,
                    planner,
                    control=ctrl,
                    max_episode_length=blocks_world_size * 2,
                    planning_factor=0,
                    plan_on_empty_policy=True,
                    exploring_starts=True,
                    exploring_factor=0)

    experiments.append(('Mean-based', None, mc))

for step_size_parameter in step_size_parameters * number_of_trials:

    # Other cases: Gradient-based agents with different step size parameter values

    blocks_world_builder = BlocksWorldBuilder(blocks_world_size)
    planner = Planner(planning_horizon)
    ctrl = SgdMonteCarloControl(step_size_parameter)
    mc = MonteCarlo(blocks_world_builder,
                    planner,
예제 #35
0
파일: Player.py 프로젝트: GonVas/morelli
 def move(self, events, pygame, game):
     print('AI THINKING')
     #self.monte.update(deepcopy(game.board))
     #play = self.monte.get_play()
     play = MonteCarlo.best_move(game.board)
     game.board.move(play[0], play[1], destructive=True)
예제 #36
0
from MonteCarlo import MonteCarlo
import numpy as np

i = np.zeros(100)
i[50] = 50

mc = MonteCarlo(1,1000,i)
mc.runSimulation()
예제 #37
0
import os
import sys

# Make sure the path of the framework is included in the import path
sys.path.insert(
    0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))

from tests import test_policy
from MonteCarlo import MonteCarlo
from mdp import BlocksWorldBuilder
from control import SimpleMonteCarloControl, SgdMonteCarloControl
from planner import Planner

from matplotlib import pyplot as plt

mdp_builder = BlocksWorldBuilder(blocks_world_size=7)
planner = Planner(planning_horizon=5)
ctrl = SimpleMonteCarloControl()
mc = MonteCarlo(mdp_builder,
                planner,
                control=ctrl,
                max_episode_length=14,
                planning_factor=0,
                plan_on_empty_policy=True,
                exploring_starts=True,
                exploring_factor=0.0)
learned_policy = mc.learn_policy(number_episodes=150, show_progress_bar=True)
	def setUp(self):
		self.foo = MonteCarlo()