Exemple #1
0
    def RL_MLP          ( self, TopN,  maxiter, ep_decay, paralistRL, Symbol, NetworkType, PorPV, TRCost,  Tau_w, Tau_s  ):
    
        ### GET TRAINING, VALIDATION, and TEST DATA
	pathsTRVL, pathsTEST = self.getData ( NetworkType, PorPV )

        # length of input vector
        if PorPV == 'VP':
            W   = 32               
        elif PorPV == 'VPV':
            W   = 64
        H   = 1          

	### START TRAINING ###  
	#################################################################################
	for f in range ( len ( pathsTRVL ) ):
	    XTr,YTr, XVal,YVal  = self.DRead.setData_TRVL   ( pathsTRVL[f] )
	    trm                 = TR.trainModel             ( 1.0, 0.1, ep_decay, self.num_actions, 0.99,  maxiter  )
	    trm.set_Data( XTr, YTr, XVal, YVal )

	    # MLP
	    Net                 = oM.constructDense ( self.num_actions, W )
	    Net_T               = oM.constructDense ( self.num_actions, W )
	    state               = tf.random_uniform ( [1,W], 0, 1 )

	    # copy weights
	    Net                     ( state, False )
	    Net_T                   ( state, False ) 
	    Net.save_weights        ( './para/Network' )
	    Net_T.load_weights      ( './para/Network' )

	    # start training 
	    trm.start_training( Symbol, Net, Net_T,  W, H, f, paralistRL, TopN  ) 

        ### START TESTING ###
	##################################################################################
	tsm         = TS.testModel( self.num_actions )
	WtList      = tsm.get_foldername_list ( './'+ Symbol + '/weights/' )

	initAsset   = 1.0 
	for f  in range ( len (pathsTEST ) ):
	    XVal, XTest,YTest,WTest = self.DRead.setData_Test   ( pathsTEST[f] )
	    tsm                     = TS.testModel              ( self.num_actions ) 
	    tsm.set_Data ( XVal, XTest, YTest, WTest )

	    Net                     = oM.constructDense         ( self.num_actions, W )
	    initAsset               = tsm.start_testing         ( Symbol, Net, W, H, f, WtList[f], Tau_w, Tau_s, initAsset, (TRCost / 2.0) )
Exemple #2
0
def main():
    startTime = datetime.now()
    train_data, train_labels, bestN, bestK = trainModel()
    print('finished training the model', datetime.now() - startTime)
    carList = testModel(train_data, train_labels, bestN, bestK)
    print('finished', datetime.now() - startTime)

    print(carList)
    return carList
Exemple #3
0
def main():
    parser = OptionParser()
    parser.add_option("-p", "--testpath", dest="test_path", help="provide the test root path")
    parser.add_option("--resultpath", dest="result_path", help="provide the result path")
    (options, args) = parser.parse_args()
    test_path = options.test_path
    result_path = options.result_path

    train_start = time.time()
    trainModel()
    train_end = time.time()
    elasped_time = train_end-train_start
    print(humanTime(elasped_time))

    test_start = time.time()
    testModel(test_path,result_path)
    test_end = time.time()
    elasped_time = test_end-test_start
    print(humanTime(elasped_time))
Exemple #4
0
    def SL_MLP(self, TopN, maxiter, paralistSL, Symbol, NetworkType, PorPV,
               TRCost, Tau_w, Tau_s):

        ### GET TRAINING, VALIDATION, and TEST DATA
        pathsTRVL, pathsTEST = self.getData(NetworkType, PorPV)

        # length of input vector
        if PorPV == 'VP':
            W = 32
        elif PorPV == 'VPV':
            W = 64
        H = 1

        ### START TRAINING ###
        #################################################################################
        for f in range(len(pathsTRVL)):
            XTr, YTr, XVal, YVal = self.DRead.setData_TRVL(pathsTRVL[f])
            trm = TR.trainModel(self.num_actions, maxiter)
            trm.set_Data(XTr, YTr, XVal, YVal)

            # MLP
            Net = oM.constructDense(self.num_actions, W)

            # start training
            trm.start_training(Symbol, Net, W, H, f, paralistSL, TopN)

    ### START TESTING ###
        ##################################################################################
        tsm = TS.testModel(self.num_actions)
        WtList = tsm.get_foldername_list('./' + Symbol + '/weights/')

        initAsset = 1.0
        for f in range(len(pathsTEST)):
            XVal, XTest, YTest, WTest = self.DRead.setData_Test(pathsTEST[f])
            tsm = TS.testModel(self.num_actions)
            tsm.set_Data(XVal, XTest, YTest, WTest)

            Net = oM.constructDense(self.num_actions, W)
            initAsset = tsm.start_testing(Symbol, Net, W, H, f, WtList[f],
                                          Tau_w, Tau_s, initAsset,
                                          (TRCost / 2.0))
Exemple #5
0
    print('training ')
    step = 77064
    BAcc = 0
    for i in range(config.START_EPOCH, config.TOTAL_EPOCHS ):
        AvgLoss = 0
        for batch_idx, (img, label) in enumerate(ds_train):
            step += 1
            lr = lr_schedule.__call__(step)
            optimizer = keras.optimizers.Adam(learning_rate=lr, beta_1=0.9)
            with tf.GradientTape() as tape:
                output = vision_transformer(img, training=True)
                loss = loss_fn(label, output)
                AvgLoss += loss
            gradients = tape.gradient(loss, vision_transformer.trainable_weights)
            optimizer.apply_gradients(zip(gradients, vision_transformer.trainable_weights))
            # show loss
            if batch_idx % config.LOG_LOSS == 0:
                AvgLoss = AvgLoss / float(config.LOG_LOSS)
                print(f'[epoch: %4d/ ' % i + 'EPOCHS: %4d]\t' % config.TOTAL_EPOCHS +
                      '[step: %6d/ ' % step + 'STEPS: %6d]\t' % config.TOTAL_STEPS +
                      '[loss:%.4f]' % AvgLoss + ' [learning rate: %.6f]' % lr)
                AvgLoss = 0

        if i % config.LOG_EPOCH == 0:
            acc = testModel(vision_transformer)
            if acc > BAcc:
                vision_transformer.save_weights(config.SAVE_PATH)
                BAcc = acc
                print(f'saved path: {config.SAVE_PATH}')
            print(f'test ---> [epoch: %4d/ ' % (i+config.START_EPOCH) + 'EPOCHS:%4d]\t' % config.TOTAL_EPOCHS +
                  '[acc:%.4f' % acc + ', BAcc:%.4f]' % BAcc)
Exemple #6
0
import argparse
from train import trainModel
from test import testModel

def parseArguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('-t',
                        '--train',
                        action='store_true',
                        help='train the model',
                        dest='train')

    return parser.parse_args()

if __name__ == '__main__':
    args = parseArguments()

    if args.train:
        trainModel()
    else:
        testModel()
Exemple #7
0
import train
import test
import torch
import numpy as np

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',dest="config", default="config.yaml")
    #config = load(open(parser.parse_args().config))
    with open("config.yaml", 'r') as stream:
        try:
            config = yaml.safe_load(stream)
        except yaml.YAMLError as exc:
            print(exc)

    if config['train']:
        model, lossHist = train.trainModel(config)
        torch.save(model.state_dict(), config['modelLoc'] + config['runName'] +
                   "Weights.pth")
        
        #Saves only weights (need to create model object and load this)
        #torch.save(model, config['modelLoc'] + config['runName'] + "Full.pth")
        #Saves entire Model (not advised, can break lot of ways due to directory issue)

    if config['test']:
        confMatrix, metrics = test.testModel(config)
        np.save(config['modelLoc'] + config['runName'] + "Confusion.npy",confMatrix)
        f = open(config['modelLoc'] + config['runName'] + "Metrics.txt",'w+')
        f.write(metrics)
        f.close()
        
Exemple #8
0
from sys import argv

print(argv)

if len(argv) > 2:
    raise Exception("A maximum of 1 argument can be called at a time. " + \
        "You have called %d arguments : %s" % (len(argv) - 1, str(argv[1:])))

if "-on" in argv or "--online" in argv:
    import online
    online.online()

elif "-tr" in argv or "--train" in argv:
    import train
    train.trainModel()

elif "-te" in argv or "--test" in argv:
    import test
    test.testModel()
Exemple #9
0
            torch.cuda.manual_seed(1)

        net = ConvNet(4)
        if haveCuda:
            net = net.cuda()
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, nesterov=True, weight_decay=1e-4)
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, numEpoch, eta_min=1e-2)

        for epoch in range(numEpoch):

            loss, acc = train(epoch, train_loader)
            train_accs.append(acc)
            train_losses.append(loss)

            loss, acc = val(epoch, test_loader)
            val_accs.append(acc)
            val_losses.append(loss)

            scheduler.step()

            if acc > best_acc:
                print("Best Model, Saving")
                best_acc = acc
                torch.save(net, model_dir)

        # Results
        plotResults(numEpoch, train_accs, train_losses, val_accs, val_losses)

    testModel(targets, train_loader, batch_size, model_dir)
            for node in nodeMap.itervalues():
                f.write(unicode(node.outputAlphaVector()) + u'\n')

    if distribution:    outputAllNodeDistributions()
    if partitions:      outputAllFactorPartitions()
    if alphas:          outputAllAlphaVectors()


if __name__ == "__main__":

    alltuples = []
    with codecs.open('tuples.txt', 'r', encoding='utf-8') as f:
        def _converter(line):
            kanji, furigana = line.split()
            return (kanji, furigana)
        alltuples = map(_converter, f.readlines())

    print ' ** Training set contains %d tuples.' % (len(alltuples))

    TOTAL_TRIAL = 3
    for trial in range(TOTAL_TRIAL):
        learn(trial, alltuples)
        adjustParameters()
        outputResult(trial)

        print ' ** Start testing:'
        testModel(nodeMap)

    print ' ** All done. Output written to files.'