Пример #1
0
def training(PolicyNetwork,
             epochs=10,
             eta=0.001,
             batch_size=5,
             error_function=0,
             file="dan_data_10",
             adaptive_rule='linear',
             sample_proportion=1,
             db=False,
             db_name="none",
             details=True):
    t = time.time()
    if not db:
        testset = TrainingDataSgfPass("dgs", file)
        init_error = PolicyNetwork.propagate_set(testset, db, adaptive_rule,
                                                 error_function)
    else:
        [no,
         sett] = PolicyNetwork.extract_batches_from_db(db_name, batch_size,
                                                       sample_proportion)
        testset = [no, sett]
        init_error = "TODO"  # TODO do this. Problem: Sett might not contain all games (sample-prop!=1)

    if details:
        print("Propagation and import of the set took",
              np.round(time.time() - t, 3), "seconds.")
        print("Learning is done for", epochs, "epochs, with batch size",
              batch_size, ",eta", eta, ",error function number",
              error_function, "and with games given by the file ", file, ".")
        t = time.time()
        print("Learning in progress...")

    errors_by_epoch = PolicyNetwork.learn(testset, epochs, eta, batch_size,
                                          sample_proportion, error_function,
                                          db, db_name, adaptive_rule)

    if not db:
        testset = TrainingDataSgfPass("dgs", file)
        final_error = PolicyNetwork.propagate_set(testset, db, adaptive_rule,
                                                  error_function)
    else:
        final_error = "TODO"  # TODO do this
    if details:
        print("Finished learning.")
        print("Details on the results:")
        print('Initial Error:', init_error)
        print('Final Error:', final_error)
        print("Total time needed for training:", time.time() - t)

        print("Visualization:")
        plt.figure(0)
        plt.plot(range(0, len(errors_by_epoch)), errors_by_epoch)
        plt.title("Error in each epoch")
        plt.xlabel("epochs")
        plt.ylabel("Error")
        print("Error was measured with error function number",
              str(error_function))
        plt.show()

    return errors_by_epoch
Пример #2
0
 def Learnsplit(self, trainingdata, eta, batch_size, stoch_coeff, error_function, trainingrate, error_tolerance, maxepochs):
     N = len(trainingdata.dic)
     splitindex = int(round(N*trainingrate))
     trainingset, testset = TrainingDataSgfPass(), TrainingDataSgfPass()
     trainingset.dic = dict(list(trainingdata.dic.items())[:splitindex])
     testset.dic = dict(list(trainingdata.dic.items())[splitindex:])
     
     error = [error_tolerance+1]
     epochs = 0
     while error[-1:][0] > error_tolerance and epochs < maxepochs:
         epochs += 1
         self.Learn(trainingdata, 1, batch_size, stoch_coeff, error_function)
         error.append(self.PropagateSet(testset,error_function))
     return [error,epochs]
Пример #3
0
def test3():
    PN = PolicyNet()
    print("Hello there")
    #PN.LearnDB(dbName='dan_data_295_db', sample_proportion=0.01, eta_start=0.01, stoch_coeff=1,error_function=0)
    testset = TrainingDataSgfPass(folder="dgs", id_list='dan_data_295')
    print(len(testset.dic))
    error = PN.PropagateSetAdaptive(testset)
    print("Error:", error)
Пример #4
0
    def splitintobatches(self, trainingdata, batchsize): #splits trainingdata into batches of size batchsize
        N = len(trainingdata.dic)
        if batchsize > N:
            batchsize = N
        k = int(np.ceil(N/batchsize))

        Batch_sets=[0]*k
        Batch_sets[0]=TrainingDataSgfPass()
        Batch_sets[0].dic = dict(list(trainingdata.dic.items())[:batchsize])
        for i in range(k-1):
            Batch_sets[i]=TrainingDataSgfPass()
            Batch_sets[i].dic=dict(list(trainingdata.dic.items())[i*batchsize:(i+1)*batchsize])
        Batch_sets[k-1]=TrainingDataSgfPass()
        Batch_sets[k-1].dic = dict(list(trainingdata.dic.items())[(k-1)*batchsize:N])
        number_of_batchs = k
        return[number_of_batchs, Batch_sets]
Пример #5
0
def train_dict(layers=[9 * 9, 1000, 200, 9 * 9 + 1], filter_ids=[0, 1, 2, 3, 4, 5, 6, 7], batch_size=100, eta=0.001,
               err_fct=0, duration_in_hours=8, custom_save_name="none", adaptive_rule="logarithmic",
               regularization=0, momentum=0):
    print("This Script will generate a PolicyNet and train it for a certain time.")
    print("For this, Dictionaries are used. No DBs are involved.")
    print("")
    print("Info:")
    print("Layers ", layers)
    print("filter_ids ", filter_ids)
    print("batch_size", batch_size)
    print("eta", eta)
    print("err_fct", err_fct)
    print("duration_in_hours", duration_in_hours)
    print("custom_save_name", custom_save_name)
    print("adaptive_rule", adaptive_rule)
    print("Regularization parameter:", regularization)
    print("Momentum parameter:", momentum)
    print("")
    PN = PolicyNet(layers=layers, filter_ids=filter_ids)
    testset = TrainingDataSgfPass("dgs", "dan_data_10")
    print("Games have been imported from dan_data_10.")
    [number_of_batchs, batchs] = PN.splitintobatches(testset, batch_size)
    print("Split up into", number_of_batchs, "Batches with size", batch_size, ".")
    errors_by_epoch = []
    init_error = PN.propagate_set(testset, False, adaptive_rule, err_fct)
    start = time.time()
    epoch = 0
    print("Training process starts now. It will take ", duration_in_hours, " hours.")
    while time.time() - start < duration_in_hours * 60 * 60:
        t = time.time()
        errors_by_epoch.append(0)
        for i_batch in range(0, number_of_batchs):
            error_in_batch = PN.learn_batch(batchs[i_batch], eta, err_fct, False, adaptive_rule, True, regularization, momentum)
            errors_by_epoch[epoch] += error_in_batch
        errors_by_epoch[epoch] = errors_by_epoch[epoch] / number_of_batchs
        print("Epoch", epoch, "with error", errors_by_epoch[epoch])
        print("Time needed for epoch in seconds:", np.round(time.time() - t))
        epoch = epoch + 1
    print("")
    if custom_save_name is "none":
        save_name = "weights"+str(duration_in_hours)+"hours"+"".join(str(x) for x in filter_ids)+"filtids"+epoch+"epochs"
    else:
        save_name = custom_save_name
    PN.saveweights(save_name)
    total_time = time.time() - start
    print("Total time taken for training:", total_time, "and epochs", epoch)
    print("Average time per epoch:", total_time / epoch)
    print("Initial error:", init_error)
    print("Final error:", errors_by_epoch[-1])
    improvement = init_error - errors_by_epoch[-1]
    print("Total error improvement:", improvement)
    print("Error development: ", errors_by_epoch)
    print("Error reduction per second:", improvement / total_time)
    plt.plot(range(0, len(errors_by_epoch)), errors_by_epoch)
    plt.show()
Пример #6
0
def test4():
    PN = PolicyNet()
    PNA = PolicyNet()
    testset = TrainingDataSgfPass("dgs",range(10))
    err=PN.PropagateSet(testset)
    errA=PNA.PropagateSetAdaptive(testset)
    print(err,errA)
    
    epochs=3
    e1=PN.Learn(testset,epochs,0.01,10,1,0)
    print("first done")
    e2=PNA.Learn(testset,epochs,0.01,10,1,0)
    print(e1,e2)
    
    
#test4()
Пример #7
0
def test2(): #passen
    PN = PolicyNet()
    testset = TrainingDataSgfPass("dgs",'dan_data_10')
    error=PN.Learn(testset,2)
    print("No batchs: error",error)
Пример #8
0
def test1():
    PN = PolicyNet()
    testset = TrainingDataSgfPass("dgs",'dan_data_10') 
    epochs=2
    error_by_epoch = PN.Learn(testset,epochs)
    plt.plot(range(0,epochs),error_by_epoch)
Пример #9
0
            '\n' + 'seconds per epoch: ' + str(seconds_per_epoch))
        fclient.close()
    else:
        print('........................')
        print('Please choose another filename! This is already taken.')

# Stefan:
if your_name is "Stefan":
    # hier schreibe ich mein training rein
    print("halo I bims")

    training_program = 11

    if training_program == 1:  # Checking error on a testset, while training on a different set
        PN = PolicyNet()
        testset = TrainingDataSgfPass("dgs", "dan_data_10")
        trainingset = TrainingDataSgfPass("dgs", "dan_data_295")
        start = time.time()
        errors_by_training = []
        while time.time() - start < 0.5 * 60 * 60:  #half hour
            errors_by_epoch = PN.Learn(trainingset, 5, 0.01, 200, 0.8, 0)
            errors_by_training.append(errors_by_epoch)

    if training_program == 2:  #standard batch Dan 10
        trainingdata = TrainingDataSgfPass("dgs", "dan_data_10")
        batch_size = 100
        eta = 0.01
        stoch_coeff = 1
        epochs = 20
        error_function = 0
        activation_function = 0