示例#1
0
def main():
    quit = -1
    while quit < 0:
        print "-" * 30 + " Project 1 " + "-" * 30
        # print "Choose:"
        print "\n\n For a: \n  1: Graph Figure 7.16 in [GT] page 364 \n Default values (from = 'BWI', to = ['SFO','LAX']) \n\n For b: \n  2: MC with (3,2) \n  3: MC with (4,3) \n\n To Quit: \n  4: quit\n\n"
        choice = input('Enter your choise: ')
        if choice == 4:
            quit = 1
        elif choice == 3:
            mc.main(4, 3)
        elif choice == 2:
            mc.main(3, 2)
        elif choice == 1:
            gr.main()
示例#2
0
def rollout_MC_per_run(env, runtime, runtimes, episodes, target, gamma, Lambda,
                       alpha, beta):
    print('rolling out %d of %d for MC' % (runtime + 1, runtimes))
    expected_return_trace, variance_of_return_trace, return_counts = MC(
        env, episodes, target, target, None, gamma)
    stationary_dist = return_counts / np.sum(return_counts)
    return (expected_return_trace[-1], variance_of_return_trace[-1],
            stationary_dist)
示例#3
0
def Encrypt():
    try:
        mensaje = "A file with name credentials.txt\n" + " was generated "

        fileD = tf.askdirectory()
        MC.AnonNews(fileD)
        msg.showinfo("Encrypt", "Directory Encrypted")
        msg.showinfo("Atención", mensaje)
    except:
        msg.showerror("Encrypt", "An Error ocurred in Encrypt process")
示例#4
0
def randomAgent(agent):  #code for the random agent playing the game
    win = False
    actionSet = {i for i in range(w * h)}
    while not win:
        action = random.choice(tuple(actionSet))
        y = int(action / h)
        x = action % w
        if not MC.checkHit([y, x], agent.ships, agent.enemyBoard):
            agent.enemyBoard[y][x] = 1
        win = checkWin(agent.ships, agent.enemyBoard)
        actionSet.remove(action)
示例#5
0
def DesEncrypt():
    try:
        msg.showinfo("Key", "Select your key for decrypt")
        file_key = tf.askopenfilename()
        with open(file_key, 'rb') as file:
            key = file.read()
        msg.showinfo("Directorio", "Select directory for decrypt")
        fileD = tf.askdirectory()
        MC.AnonNewsD(fileD, key)
        msg.showinfo("Encriptación", "Decrypted correctly")
    except:
        msg.showerror("Encriptación", "Error in decrypt process")
示例#6
0
def run():

    #Selecting the type of simulation
    take_0 = input('Please select the type of run | 1- MD 2- MC:')
    if take_0 == 1:
        #Selecting the integrator
        take = input(
            'Please select the integrator | 1- Velocity Verlet 2- Leap Frog :')
        #Selecting the ensemble
        if take == 1:
            take_2 = input('Please select the ensemble | 1- NVE 2- NVT :')
            if take_2 == 1:
                Vel_verlet_NVE.Run_Vel_ver(1.0, 1.0, 1.0, 1.0, 0.001)
            if take_2 == 2:
                take_3 = input(
                    'Please input the average temperature you got from NVE or put a temperature of your choice(in dimensionless units as per L-J phase diagram:'
                )
                t_nvt_vv = (float)(take_3)
                Vel_verlet_NVT.Run_Vel_ver(1.0, 1.0, 1.0, 1.0, 0.001, 0.001,
                                           t_nvt_vv)
            else:
                print('Please select a valid option. Run the program again')
                exit(1)
        if take == 2:
            take_4 = input('Please select the ensemble | 1- NVE 2- NVT :')
            if take_4 == 1:
                Leap_Frog_NVE.Run_Leap_Frog(1.0, 1.0, 1.0, 1.0, 0.001)
            elif take_4 == 2:
                take_5 = input(
                    'Please input the average temperature you got from NVE or put a temperature of your choice(in dimensionless units as per L-J phase diagram:'
                )
                t_nvt_lf = (float)(take_5)
                Leap_Frog_NVT.Run_Leap_Frog(1.0, 1.0, 1.0, 1.0, 0.001,
                                            t_nvt_lf, 0.001)
            else:
                print('Please select a valid option. Run the program again')
                exit(1)
        else:
            print('Please select a valid option. Run the program again')
            exit(1)
    elif take_0 == 2:
        MC.Run_MC_Metropolis(1.0, 1.0, 1.0, 1.0, 0.001)
    else:
        print('Please select a valid option. Run the program again')
        exit(1)
    def main(self):
        # set hyperparameters
        theta = [0.01 for i in range(self.dim + 1)
                 ]  # initailize (dim + 1) delta to the same value
        threshold = 0.0001
        max_iter = 50

        pairs = []  #generate a (self.dim^2) list to hold all possible pairs
        for i in range(self.dim):
            for j in range(self.dim):
                pairs.append([i, j])

        n = MC.MC()
        f, p = n.rtm(self.data)

        log_likelihood = 0
        for k in range(max_iter):
            theta = self.update(theta, pairs, f)

            # compute new log likelihood
            new_log_likelihood = 0
            for i in range(self.dim):
                for j in range(len(pairs)):
                    # for lambda1
                    i_1, i_2 = pairs[j][0], pairs[j][1]
                    row = i_1 + i_2 * self.dim
                    denominator = self.lambda1 * self.Q[i_1][
                        i] + self.lambda2 * self.Q[i_2][i]
                    if denominator != 0:
                        new_log_likelihood += f[row][i] * math.log(denominator)
            # check whether log likelihood converges
            if log_likelihood == 0:
                log_likelihood = new_log_likelihood
            elif new_log_likelihood - log_likelihood < threshold:
                #print(new_log_likelihood - log_likelihood, "gives log_likelihood:", log_likelihood)
                return new_log_likelihood
            else:
                log_likelihood = new_log_likelihood
        print()
        print("final:", self.lambda1, self.lambda2)
        print("final:", self.Q)
        print("log_likelihood:", log_likelihood)
        return log_likelihood
示例#8
0
steps = params['Monte-Carlo']['Number_of_steps']

positions, energies = data.load_pos(
    dataset=params['dataset'], limit=steps
)

random_int = randint(0, steps - 1)

positions_history, energy_history, acceptance_rate = MC.simulate(
    init_pos = positions[random_int],
    init_en = energies[random_int],
    model=model,
    pcas=pcas,
    desc_scalers=scalers,
    en_scaler=params['scalers']['energies_scaler'],
    soap=params['soap'],
    steps=steps,
    delta=params['Monte-Carlo']['box_size'],
    T=params['Monte-Carlo']['temperature'],
    dataset=params['dataset']
)

print('Acceptance Rate :', acceptance_rate)

print('Comparing Monte Carlo and MD...')

# Distance entre les deux atomes d'oxygène
distances_MD = np.linalg.norm(positions[:,0] - positions[:,1], axis=1)
distances_MC = np.linalg.norm(positions_history[:,0] - positions_history[:,1], axis=1)
示例#9
0
# Run this file to see that our code works

import QL
import MC

print("Running Monte-Carlo learning algorithm for 1000 episodes...")
print("Please exit Monte-Carlo graph to continue")
MC.main(1000)

print("Running Q-learning learning algorithm for 1000 episodes...")
QL.main(1000)
示例#10
0
                               [-1, 0, -1]],
                              [[0, 2, 0], [1, 1, 0], [-1, 1, 0], [0, 1, 1],
                               [0, 1, -1]],
                              [[0, -2, 0], [1, -1, 0], [-1, -1, 0], [0, -1, 1],
                               [0, -1, -1]],
                              [[0, 0, 2], [1, 0, 1], [-1, 0, 1], [0, 1, 1],
                               [0, -1, 1]],
                              [[0, 0, -2], [1, 0, -1], [-1, 0, -1], [0, 1, -1],
                               [0, -1, -1]]])
reverse_neigh_indices = {0: 1, 1: 0, 2: 3, 3: 2, 4: 5, 5: 4}

# call Cython implementation

print("Start Cython Metropolis with {0} steps".format(maxsteps))
tstart = time.time()
lattice = MC.metropolis(maxsteps, N, lattice, u11, u00, kT)
tend = time.time()
print("Took {0} s".format(tend - tstart))
"""
#p.ion()
# p.figure()
# p.imshow(lattice, interpolation='none')
# p.suptitle("Initial configuration")
# p.show()
#p.draw()
#time.sleep(.5)
printflag = -1
tstart = time.time()
tdelta = 0.
for i in range(maxsteps):
    curr_pos = n.random.randint(N, size=3) # pick random coordinates to update
示例#11
0
def difference(delta):
    x = MC.runMC(MCcycles2, delta, idum, alpha)
    return x.accepted * 1.0 / MCcycles2 - .5  #We want 50% accepted moves
示例#12
0
#Written by Magnar K. Bugge

from math import sqrt
from numpy import linspace
import pypar
import MC

nprocs = pypar.size()  #Number of processes
myid = pypar.rank()  #Id of this process

MCcycles = 10000000  #Number of MC cycles
MCcycles2 = 10000  #Number of MC cycles for determination of optimal delta
delta_min = .01  #Minimum length of Metropolis step
delta_max = 2.0  #Maximum length of Metropolis step
tolerance = .01
idum = MC.seed() * (
    myid + 1)  #Seed for random number generator (different for each process)


#Function which should be close to zero for optimal delta
def difference(delta):
    x = MC.runMC(MCcycles2, delta, idum, alpha)
    return x.accepted * 1.0 / MCcycles2 - .5  #We want 50% accepted moves


#Array of alpha values
values = linspace(1.4, 2.0, 13)  #(alpha values)

if myid == 0:
    outfile = open('data', 'w')
示例#13
0
from utils import *
from joblib import Parallel, delayed
from MC import *
import numpy.matlib, argparse

parser = argparse.ArgumentParser(description='')
parser.add_argument('--episodes', type=int, default=int(1e8), help='')
args = parser.parse_args()

env = gym.make('FrozenLake-v0'); env.reset()
N = env.observation_space.n
gamma = lambda x: 0.95

target_policy = np.matlib.repmat(np.array([0.2, 0.3, 0.3, 0.2]).reshape(1, 4), env.observation_space.n, 1)

# get ground truth expectation, variance and stationary distribution
filename = 'frozenlake_truths_heuristic_%g.npz' % args.episodes
try:
    loaded = np.load(filename)
    true_expectation, true_variance, stationary_dist = loaded['true_expectation'], loaded['true_variance'], loaded['stationary_dist']
except FileNotFoundError:
    true_expectation, true_variance, return_counts = MC(env, args.episodes, target_policy, target_policy, gamma)
    stationary_dist = return_counts / np.sum(return_counts)
    np.savez(filename, true_expectation=true_expectation, true_variance=true_variance, stationary_dist=stationary_dist)
pass
示例#14
0
    plt.ylabel('Win Rate')
    plt.title('Summary of ' + a1Name + ' and ' + a2Name + ' over episodes')
    plt.legend(loc="upper left")
    plt.show()


if __name__ == '__main__':
    mcAverageMoves = []
    qlAverageMoves = []
    rAverageMoves = []
    for i in range(50, 1001, 50):
        aveMC = []
        aveQL = []
        aveR = []
        for j in range(20):
            b1 = MC.main(i)
            mcMoves = totalMoves(b1)
            aveMC.append(mcMoves)

            b2 = QL.main(i)
            qlMoves = totalMoves(b2)
            aveQL.append(qlMoves)

            rAgent = Agent(h, w)
            randomAgent(rAgent)
            rMoves = totalMoves(rAgent.enemyBoard)
            aveR.append(rMoves)

        #arrays of shape (20,20). 20 episodes and 20 games per episode
        mcAverageMoves.append(aveMC)
        qlAverageMoves.append(aveQL)
示例#15
0
#Variational Monte Carlo program for the Helium atom which utilizes the code in MC.cpp
#Written by Magnar K. Bugge

from math import sqrt
from numpy import linspace
import MC

MCcycles = 100000000 #Number of MC cycles
MCcycles2 = 10000 #Number of MC cycles for determination of optimal delta
delta_min = .01 #Minimum length of Metropolis step
delta_max = 2.0 #Maximum length of Metropolis step
tolerance = .01
idum = MC.seed() #Seed for random number generator

#Function which should be close to zero for optimal delta
def difference(delta):
    x = MC.runMC(MCcycles2,delta,idum,alpha)
    return x.accepted*1.0/MCcycles2 - .5 #We want 50% accepted moves

#Array of alpha values
values = linspace(1.4,2.5,23) #(alpha values)

outfile = open('data','w')

#Loop over alpha values
for alpha in values:

    #Determination of optimal delta value (for each alpha), i.e.
    #finding the zero-point of the difference function by the bisection method
    minimum = delta_min
    maximum = delta_max
示例#16
0
myid = pypar.rank() #Id of this process

class result:
    alpha = 0.0
    E = 0.0
    sigma = 0.0
    error = 0.0
    acceptance = 0.0
    id = 0 #id of the process who did this job

MCcycles = 100000000 #Number of MC cycles
MCcycles2 = 10000 #Number of MC cycles for determination of optimal delta
delta_min = .01 #Minimum length of Metropolis step
delta_max = 2.0 #Maximum length of Metropolis step
tolerance = .01
idum = MC.seed() * (myid+1) #Seed for random number generator (different for each process)

#Function which should be close to zero for optimal delta
def difference(delta):
    x = MC.runMC(MCcycles2,delta,idum,alpha)
    return x.accepted*1.0/MCcycles2 - .5 #We want 50% accepted moves

#Function for sorting the results from small to large alpha
def sort(results):
    sorted_results = []
    
    for i in xrange(len(values)):
        sorted_results.append(result())
        sorted_results[i].alpha = values[i]
        j = 0
        while results[j].alpha != sorted_results[i].alpha:
示例#17
0
#Variational Monte Carlo program for the Helium atom which utilizes the code in MC.cpp
#Written by Magnar K. Bugge

from math import sqrt
from numpy import linspace
import MC

MCcycles = 100000000  #Number of MC cycles
MCcycles2 = 10000  #Number of MC cycles for determination of optimal delta
delta_min = .01  #Minimum length of Metropolis step
delta_max = 2.0  #Maximum length of Metropolis step
tolerance = .01
idum = MC.seed()  #Seed for random number generator


#Function which should be close to zero for optimal delta
def difference(delta):
    x = MC.runMC(MCcycles2, delta, idum, alpha)
    return x.accepted * 1.0 / MCcycles2 - .5  #We want 50% accepted moves


#Array of alpha values
values = linspace(1.4, 2.5, 23)  #(alpha values)

outfile = open('data', 'w')

#Loop over alpha values
for alpha in values:

    #Determination of optimal delta value (for each alpha), i.e.
    #finding the zero-point of the difference function by the bisection method
示例#18
0
    def print_evaluation(self):
        print("value matrix")
        for valuable_A in range(2):
            print("valuable A", valuable_A)
            for dealer_card in range(10):
                for player_sum in range(10):
                    state_idx = (player_sum*10+dealer_card)*2 + valuable_A
                    print("{:.3f}".format(self.q[state_idx][self.p[state_idx]]), end=' ')
                print()
    
    def print_improvement(self):
        print("policy matrix")
        for valuable_A in range(2):
            print("valuable A", valuable_A)
            for dealer_card in range(10):
                for player_sum in range(10):
                    state_idx = (player_sum*10+dealer_card)*2 + valuable_A
                    print(self.p[state_idx], end=' ')
                print()

if __name__ == "__main__":
    test = myAgent((21-12+1)*10*2, 2, 1.0)

    # for i in range(10):
    #     print("=================", "episode", i, "=================")
    #     test.new_episode()

    method = MC.algo(test, 0.0001)
    method.MC_control(100)#show=True)
    test.print_evaluation()
    test.print_improvement()
示例#19
0
i = 0
#x = filter(None, x)
list(filter(None, x))
isin = []
#print(x)
stockCodes = getStockCodes(x, isin)
print(stockCodes)
sentimentList = []
BValue = []
TDebt = []
TAssets = []
NProfit = []
NProfit_Qtr = []
currentSharePrice = []

MC.funcMC(currentSharePrice, sentimentList, BValue, TDebt, TAssets, NProfit,
          NProfit_Qtr, isin, stockCodes)
formatData(currentSharePrice, sentimentList, BValue, TDebt, TAssets, NProfit,
           NProfit_Qtr)
#print(BValue)
#print(TDebt)
#print(TAssets)
#print(NProfit)
#print(NProfit_Qtr)
print(x)
getStockData(x, stockCodes, sentimentList, BValue, TDebt, TAssets, NProfit,
             NProfit_Qtr)

#print(isin)
#getDataFromMC(x)
#print(x);
#a = price[0].split(',')
示例#20
0
import  MC
import parse_pos
import proj

list_pos= parse_pos.recup_pos()

Xchap= MC.MC(list_pos)
j=0
for i in range (len(list_pos)):
    if not list_pos[i].parent:
        list_pos[i].X_MC = Xchap[j]
        j+=1
        list_pos[i].Y_MC = Xchap[j]
        j+=1
        list_pos[i].Z_mc = Xchap[j]
        j+=1
        list_pos[i].add_to_file("resultats.csv")

points = proj.lecture("resultats.csv")
pr = proj.choix_proj_cc(points)

print('Paramètres de la projection conique conforme minimisant le module linéaire:\n', 'Phi0 =',\
      rad_to_deg(pr.phi0), '\n', 'Phi1 =', rad_to_deg(pr.phi1), '\n', 'Phi2 =',rad_to_deg(pr.phi2),\
      '\n', 'X0 :', pr.X0, 'Y0 :', pr.Y0, '\n', "ellipsoide de référence WGS 84")

proj.affiche(points, pr)

示例#21
0
import easy21
import MC
from plot import *
import pickle 


# Run Monte-Carlo Simulation

RERUN = True

if RERUN:
	mc = MC.MonteCarlo21(N0=100.0, every_visit=False)
	for i in range(1000000):
		mc.run_episode()
		# if i % 1000 == 0:
		# 	print(mc._action_state_values[0:2,:,:])
		# 	mc.plot_optimal_value_function()
	f = open('mc.pickle', 'wb')
	pickle.dump(mc, f)
else:
	f = open('mc.pickle', 'rb')
	mc = pickle.load(f)

plot_optimal_value_function(mc._action_state_values, mc._action_state_counts, show=True)
示例#22
0
#Written by Magnar K. Bugge

from math import sqrt
from numpy import linspace
import pypar
import MC

nprocs = pypar.size() #Number of processes      
myid = pypar.rank() #Id of this process

MCcycles = 10000000 #Number of MC cycles
MCcycles2 = 10000 #Number of MC cycles for determination of optimal delta
delta_min = .01 #Minimum length of Metropolis step
delta_max = 2.0 #Maximum length of Metropolis step
tolerance = .01
idum = MC.seed() * (myid+1) #Seed for random number generator (different for each process)

#Function which should be close to zero for optimal delta
def difference(delta):
    x = MC.runMC(MCcycles2,delta,idum,alpha)
    return x.accepted*1.0/MCcycles2 - .5 #We want 50% accepted moves

#Array of alpha values
values = linspace(1.4,2.0,13) #(alpha values)

if myid == 0:
    outfile = open('data','w')

#Loop over alpha values
for alpha in values:
示例#23
0
from __future__ import print_function

from timeit import default_timer as timer
import MC

if __name__ == '__main__':
    start = timer()
    print('Starting')
    MC.lunchPacketwithBatch(nPhotonsRequested=1e6)
    end = timer()
    print('Finished, elapsed in %ss' % (end - start))
示例#24
0
def difference(delta):
    x = MC.runMC(MCcycles2,delta,idum,alpha)
    return x.accepted*1.0/MCcycles2 - .5 #We want 50% accepted moves
示例#25
0
                                      lambda x: onehot(x, N),
                                      N,
                                      int(1e6),
                                      gamma=0.99)
    stationary_dist = return_counts / np.sum(return_counts)
    true_expectation, true_variance = E[-1], V[-1]
    np.savez(filename,
             true_expectation=true_expectation,
             true_variance=true_variance,
             stationary_dist=stationary_dist)

j, v, s = iterative_policy_evaluation(env, target_policy, gamma=gamma)
print('Iterative policy evaluation')

Lambda = LAMBDA(env, lambda_type='constant', initial_value=np.ones(N))
mc_j, mc_v, mc_counts = MC(env, 10000, target_policy, target_policy, Lambda,
                           gamma)

# test both on-policy and off-policy
Lambda = LAMBDA(env, lambda_type='constant', initial_value=np.ones(N))
off_mc_results, off_mc_var_results = eval_method_with_variance(
    MC,
    env,
    true_expectation,
    true_variance,
    stationary_dist,
    behavior_policy,
    target_policy,
    Lambda,
    gamma=gamma,
    alpha=0.05,
    beta=0.05,
示例#26
0
                line = line.strip()
                line = line[1:-1].split(",")
                line = list(map(int, line))
                coordinates.append(line)
    return [seq, coordinates]


def create_protein(seq, coordinates):
    protein = []
    for n, res in enumerate(seq):
        protein.append(
            residue.Residue("residue" + str((n + 1)), res, coordinates[n], n))
    protein_coordinates = set()
    for res in protein:
        protein_coordinates.add(tuple(res.coordinates))
    return [protein, protein_coordinates]


if __name__ == "__main__":

    argvals = None
    args = get_args(argvals)
    seq, coordinates = get_seq(args.input_file)
    protein, protein_coordinates = create_protein(seq, coordinates)
    if args.crankshaft:
        crankshaft = True
    else:
        crankshaft = False
    MC.MCsearch(protein, protein_coordinates, crankshaft, args.steps,
                args.output_pdb)
示例#27
0
                 reload(MC)
                 reload(evil)
                 reload(logs)
             
             if config.config.enablePM == True:            
                 if channel == botnick:
                     channel = user
             op.runOpCommands(ops,hostmask,channel,nick,commandChar,ircmsg,ircsock)
             calc.runCommands(channel,nick,commandChar,ircmsg,ircsock)
             BowserBucks.runCommands2(channel,nick,commandChar,ircmsg,ircsock,hostmask)
             General.runCommands(channel,nick,commandChar,ircmsg,ircsock,ops,hostmask)
             config.adminCommands(hostmask,user,channel,ops,commandChar,ircsock,ircmsg,config.config)
             config.changeConfig(hostmask,user,channel,ops,commandChar,ircsock,ircmsg,config.config)
             games.runCommands(channel,nick,commandChar,ircmsg,ircsock,hostmask)
             word.runCommands(channel,nick,commandChar,ircmsg,hostmask,ircsock)
             MC.runCommands(ircmsg,commandChar,ircsock,channel,hostmask,nick)
             evil.runCommands(hostmask,user,channel,ops,commandChar,ircsock,ircmsg)
     except: pass
 
     if config.config.enableAutocorrect == True:
         sendmsg(channel, autocorrect.correctMsg(ircmsg.split(" :",1)[1]))
     
     if config.config.enablePM == True:
         if channel == botnick:
             channel = user
             
     stats.phraseText(ircmsg,commandChar,botnick,user,hostmask,channel)
     stats.botStats(ircmsg,commandChar,botnick,user,hostmask,channel)
     MC.phraseText(ircmsg,commandChar,botnick,user,hostmask,channel)
     
     if TextPhrase.phraseText(ircmsg,commandChar,botnick,user,hostmask,channel) != [""]: