import pickle
from biasedPerceptron import BiasedPerceptron, diff
import anotherStatus as fw
import calling_station
import betting_station
import time
import numpy as np

name = 'frenzy_perceptron_.0005_vs_frenzy.p'
start = time.time()
ALPHA = 0.0005
LAMBS = [0.8, 0.85, 0.9, 0.95, 1]
n_train = 500000

csBot = calling_station.Calling_station()
bsBot = betting_station.Betting_station()
for LAMB in LAMBS:
    net = BiasedPerceptron(fw.n_in,
                           fw.n_hidden,
                           fw.n_out,
                           alpha=ALPHA,
                           lamb=LAMB,
                           randomInit=True)
    net2 = BiasedPerceptron(fw.n_in,
                            fw.n_hidden,
                            fw.n_out,
                            alpha=ALPHA,
                            lamb=LAMB,
                            randomInit=True)
    auto = fw.AnotherAutoPlayer(net, name="superbot")
    ai = fw.AnotherAutoPlayer(net2, name='cpu', frenzy=1)
示例#2
0
            next = self.status.check_fold()
            action = "CheckFold"
        self.status = next.copy()
        #update the other guy's status vector resulting from your act
        player2.status.vec_act[stage][1] = self.status.vec_act[stage][0]
        player2.status.vec_act[stage][2] = self.status.vec_act[stage][2]
        player2.status.stage = self.status.stage
        return action


import pickle
#    auto= pickle.load(open("player.p", "rb"))
#    net= UnbiasedNet.NeuralNet(fw.n_in, fw.n_hidden, fw.n_out,
#                               alpha=0.001,
#                               lamb=0.5, randomInit=False)
auto = calling_station.Calling_station()
n_train = 50000
for LAMB in [0.7, 0.8, 0.9, 1.0]:
    #net=UnbiasedNet.NeuralNet(fw.n_in, fw.n_hidden,
    #                          fw.n_out, alpha=0.001, lamb=LAMB,
    #                          randomInit=True)

    #auto2= fw.AnotherAutoPlayer(net, name="against_TA")
    #for i in range(3):
    #    auto2.train(n_train, auto, debug=0, frenzy=1)
    #    pickle.dump( auto2, open(str(i) + str(LAMB) +
    #                             'another_vs_TA.p', 'wb') )
    print "LAMB " + str(LAMB)
    auto2 = pickle.load(open(str(2) + str(LAMB) + 'another_vs_TA.p', 'rb'))
    win = []
    for i in range(10):
示例#3
0
net = UnbiasedNet.NeuralNet(framework.n_in,
                            framework.n_hidden,
                            framework.n_out,
                            alpha=0.001,
                            lamb=0.9,
                            momentum=0.1)
net2 = UnbiasedNet.NeuralNet(framework.n_in,
                             framework.n_hidden,
                             framework.n_out,
                             alpha=0.001,
                             lamb=0.9,
                             momentum=0.1)
auto = Auto_player(net, name="nova")
#auto= pickle.load(open("nova.p", "rb"))
cs = calling_station.Calling_station()
auto.train(3000, cs)
pickle.dump(auto, open("nova.p", "wb"))
#auto = Auto_player(net, name= "Moon")
#auto2= Auto_player(net2, name= "noname")
#auto.train(4000,auto2)
#pickle.dump(auto, open("moon.p", "wb"))

#now test
#auto= pickle.load(open("moon.p", "rb"))
#wins=[]
#for i in range(40):
#    wins.append(auto.compete(auto2, 2000, debug=0))
#print np.mean(wins)
#print np.std(wins)
import shorter_framework as sfw

name = 'shorter_frenzy_vs_call.p'
start=time.time()
ALPHA = 0.005
LAMBS = [0.9]
n_train = []
n_in=208
n_hidden=150
n_out=1

for LAMB in LAMBS:
    net = UnbiasedNet.NeuralNet(n_in, n_hidden, n_out,
                                alpha=ALPHA, lamb=LAMB, randomInit=True)
    auto = sfw.shorter_Auto_player(net, name="shorter_against_call")
    csbot= calling_station.Calling_station()
    distance=10
    i=0
    while distance > 0.0001:
        oldnet=auto.net.deepcopy()
        auto.net.alpha/=1.005
        auto.train(1000, csbot, debug=0, frenzy=1, recover_rate=0)
        distance= UnbiasedNet.diff(auto.net, oldnet)
        i=i+1000
    print "number of training:", i    
    pickle.dump(auto, open(str(LAMB) + name, "wb"))
    n_train.append(i)
print "the training used time", time.time()-start

j=0
for LAMB in LAMBS:  
示例#5
0
import pickle
import framework
from framework import Status
from framework import Auto_player
import UnbiasedNet
import numpy as np
import calling_station
from cheater_bot import Cheater_player

#auto=pickle.load(open("9th_gen_frenzy_vs_calling.p", "rb"))
auto = pickle.load(open("twinB.p", "rb"))
#net2=UnbiasedNet.NeuralNet(framework.n_in ,framework.n_hidden,
#                           framework.n_out, True)
#auto2=framework.Auto_player(net2)
#cheater= Cheater_player()
cheater = calling_station.Calling_station()
wins = []
for i in range(40):
    wins.append(auto.compete(cheater, 100, debug=0))
    #print wins
print np.mean(wins)
print np.std(wins)
#print auto.net.n_in, auto.net.n_hidden, auto.net.n_hidden, auto.net.w_out
#print auto2.net.w_out
#print auto.net.w_in[1], auto2.net.w_in[1]