def trainAllTest():
    import paths, PythonVersionHandler, SparkLogFileHandler, FinalizedRunners, Trainer
    l = ['besiktas', 'kol_saati', 'iphone_7', 'iphone_7_kilif', 'nike_air_max', 'tupperware', 'stres_carki', 
    'buzdolabi', 'vestel_camasir_makinesi', 'samsung_galaxy_j7_prime', 'samsung', 'dikey_elektrikli_supurge', 
    'jbl_hoparlor', 'bisiklet', 'lenovo_k6_note', 'sandalye_kilifi', 'xiaomi', 'samsung_galaxy_s6', 
    'kirmizi_converse', 'kiz_cocuk_abiye_elbise', 'avon_kadin_parfum', 'kamp_cadiri', 'adidas', 'xiaomi_mi5',
    'samsung_galaxy_s5_mini']
    d = ['beko_9_kg_camasir_makinesi', 'kot_pantalon', 'mani_jeans_kot_pantalon']
    p = ['kadin_parfum', 'samsung_galaxy_s5_mini']
    for c, keyword in enumerate(get32Keywords()):
        PythonVersionHandler.print_logging(str(c+1)+'.', keyword.upper() + ':')
        keyword = keyword.lower().replace(' ', '_')
        #if keyword in l:
        #    PythonVersionHandler.print_logging('Weights have already been learned for this keyword')
        #    continue
        if keyword in d:
            PythonVersionHandler.print_logging('Pairs do not exist for this keyword')
            continue
        elif keyword in p:
            PythonVersionHandler.print_logging('TrainingData could not be generated for this keyword')
            continue
        pairsFolder = paths.joinPath(labeledPairsMayFromMayFolder, 'allWeek')
        pairsPath = paths.joinPath(pairsFolder, keyword + '_pairs')
        outputPath = paths.joinPath(paths.specificProductsFolder, keyword + '_products')
        productVectorFolder = outputPath
        Trainer.train(pairsPath, productVectorFolder, outputPath, saving = False)
def trainTest():
    import paths, SparkLogFileHandler, FinalizedRunners, Trainer
    keyword = 'KIZ COCUK ABIYE ELBISE'.lower().replace(' ', '_') #'galaxy_s3' #'samsung_galaxy_s5_mini'
    pairsFolder = paths.joinPath(labeledPairsMayFromMayFolder, 'allWeek')
    pairsPath = paths.joinPath(pairsFolder, keyword + '_pairs')
    outputPath = paths.joinPath(paths.specificProductsFolder, keyword + '_products')
    productVectorFolder = outputPath
    Trainer.train(pairsPath, newProductVectorFolder, outputPath)
def trainingTest21():
    import paths, FinalizedRunners, Trainer
    feature_names = ['photos', 'feedbackPercentage', 'memberSoldCount', 'soldCount',
            'memberSegment', 'subtitleFlag', 'brandNew', 'freeCargo', 'windowOptionFlag']
    Trainer.setFeatureVector(feature_names)
    keywords = ['besiktas', 'kol_saati', 'iphone_7', 'iphone_7_kilif']
    for keyword in keywords: 
        folder = paths.joinPath(paths.joinPath(paths.HDFSRootFolder, 'secondWeek'), keyword)
        FinalizedRunners.trainForKeyword(keyword, folder, saving = True)
def extractPairs():
    import paths, PythonVersionHandler, Trainer, ReadyTests
    feature_names = ['photos', 'soldCount', 'feedbackPercentage', 'memberSoldCount', 'memberSegment', 
                     'subtitleFlag', 'brandNew', 'freeCargo', 'dailyOffer', 'windowOptionFlag', 'price', 'productCount']
    Trainer.setFeatureVector(feature_names)
    keywords = ReadyTests.get27Keywords()[23:]
    for c, keyword in enumerate(keywords): 
        PythonVersionHandler.print_logging(str(c+1)+'.', keyword.upper() + ':')
        trainTesting(keyword)
def extendedProductExtraction(keyword = 'iphone 7', onlyFollowings = False, AllPageButId = False):
    import paths, SparkLogFileHandler, SearchExtractor, FinalizedRunners, NewProductPreferrer, PythonVersionHandler, Trainer
    outputFolder = paths.joinPath(paths.HDFSRootFolder, 'weekAugust')
    pairs = Trainer.readLabeledPairs(outputPath)
    ids = pairs.flatMap(lambda i: i[0]).distinct()
    PythonVersionHandler.print_logging(ids.count(), 'ids have been gathered from the labeled pairs by', PythonVersionHandler.nowStr())
    productVectorFolder = paths.newProductVectorFolder3
    products = Trainer.getProducts(ids, productVectorFolder)
    Trainer.saveSpecificProduct(products, productsPath)
Example #6
0
def main():
    # Go to script's directory
    os.chdir(os.path.dirname(os.path.realpath(__file__)))

    # Train on Michael dataset, evaluate with Wookie dataset
    featMat = FeatMat()
    featMat.addFolder('../datasets/Michael')

    trainer = Trainer()
    trainer.train(featMat)
    def test(self,inputNodes,hiddenLayers,outputNodes,weightMatrices,doTraining=True):
        nw=NeuralNetwork(inputNodes,hiddenLayers,outputNodes,weightMatrices)

        bp = BackPropagation(nw)

        trainer = Trainer(nw,bp)

        if (doTraining):
            trainer.train(self.inputsTraining,self.targetTraining,self.iterations,self.N)
            numpy.save("weights",trainer.backPropagation.neuralNetwork.weights)
        else:
            trainer.backPropagation.neuralNetwork.weights = numpy.load("weights.npy")

        self.evaluate(trainer,self.inputsEvaluting,self.targetEvaluting)
Example #8
0
    def init(self):
        self.train_data = None
        eval_feats      = None
        eval_annos      = None
        self.trainer    = Trainer()

        return self
def extendedPairs(keyword = 'iphone 7', onlyFollowings = False, AllPageButId = False):
    import paths, SparkLogFileHandler, SearchExtractor, FinalizedRunners, NewProductPreferrer, PythonVersionHandler, Trainer
    keyword_name = keyword.replace(' ', '_')
    outputFolder = paths.joinPath(paths.HDFSRootFolder, 'weekAugust')
    inputPath = paths.joinPath(outputFolder, keyword_name + '/' + keyword_name + '_extractedLogs')
    logs = FinalizedRunners.getPreparedLogsFromHDFS(inputPath, filtering = False)
    searchNProductLogs = SearchExtractor.searchNProductLogsForSingleKeyword(logs, keyword)
    pairs = NewProductPreferrer.trainingInstancesForSingleKeyword(searchNProductLogs, onlyFollowings = onlyFollowings, AllPageButId = AllPageButId)
    if pairs.isEmpty():
        return
    pairs = pairs.coalesce(24)
    outputPath, productsPath = getLabeledPairsAndProductsPath(outputFolder, keyword, onlyFollowings = onlyFollowings, AllPageButId = AllPageButId)
    SparkLogFileHandler.saveRDDToHDFS(pairs, outputPath)
    ids = pairs.flatMap(lambda i: i[0]).distinct()
    PythonVersionHandler.print_logging(ids.count(), 'ids have been gathered from the labeled pairs by', PythonVersionHandler.nowStr())
    productVectorFolder = paths.newProductVectorFolder3
    products = Trainer.getProducts(ids, productVectorFolder)
    Trainer.saveSpecificProduct(products, productsPath)
def trainForKeyword(keyword, folder = 'allWeek', saving = False):
    keyword = keyword.lower().replace(' ', '_')
    import paths, SparkLogFileHandler, FinalizedRunners, Trainer
    pairsPath = paths.joinPath(folder, keyword + '_pairs')
    outputPath = paths.joinPath(folder, keyword + '_products')
    if saving:
        productVectorFolder = paths.newProductVectorFolder
    else:
        productVectorFolder = outputPath
    return Trainer.train(pairsPath, productVectorFolder, outputPath, saving = saving, keyword = keyword)
def selectFeaturesForKeyword(keyword, threshold = 0.223):
    import Trainer, PythonVersionHandler
    from pyspark.mllib.regression import LabeledPoint
    featureList = Trainer.featuresList[:-2]
    Trainer.setFeatureVector(featureList)
    trainData, testData, weights, accuracy = getTrainedWeights(keyword)
    removedFeatures = []
    accuracies = [accuracy]
    weightsRow = list(weights)
    while (not isImportant(weights, threshold = threshold)) and len(weights) > 1:
        index, featureList, removedFeature = eliminate(weights, featureList)
        removedFeatures.append(removedFeature)
        Trainer.setFeatureVector(featureList)
        def getReducedVector(lp):
            newFeatures = list(lp.features)
            newFeatures.pop(index)
            return LabeledPoint(lp.label, newFeatures)
        trainData = trainData.map(getReducedVector)
        testData = testData.map(getReducedVector)
        model = Trainer.trainPairWiseData(trainData, dataName = 'TrainData')
        accuracy = Trainer.evaluateModelOnData(model, testData, dataName = 'TestData')
        accuracies.append(accuracy)
        weights = list(model.weights)
        weightsRow.append('X')
        weightsRow.extend(weights)
    PythonVersionHandler.print_('Keyword: ' + keyword)
    PythonVersionHandler.print_('Selected features: ' + str(featureList))
    PythonVersionHandler.print_('Following features have reduced by order: ' + str(removedFeatures))
    PythonVersionHandler.print_('Accuracies from each step: ' + str(accuracies))
    row = [keyword]
    row.extend(featureList)
    row.append('X')
    row.extend(removedFeatures)
    row.extend(accuracies)
    return row, weightsRow
def trainExtendedPairsLoop(onlyFollowings = False, AllPageButId = False):
    import paths, PythonVersionHandler, Trainer, ReadyTests
    feature_names = ['photos', 'soldCount', 'feedbackPercentage', 'memberSoldCount', 'memberSegment', 
                     'subtitleFlag', 'brandNew', 'freeCargo', 'dailyOffer', 'windowOptionFlag', 'sameDay']
    Trainer.setFeatureVector(feature_names)
    keywords = ReadyTests.get27Keywords()
    for c, keyword in enumerate(keywords): 
        PythonVersionHandler.print_logging(str(c+1)+'.', keyword.upper() + ':')
        trainExtendedPairs(keyword, onlyFollowings = onlyFollowings, AllPageButId = AllPageButId)
    Trainer.saveOutputTable()
    Trainer.printOutputTable()
def trainingTestAllLoop(feature_names):
    import paths, PythonVersionHandler, FinalizedRunners, Trainer, ReadyTests
    Trainer.setFeatureVector(feature_names)
    keywords = ReadyTests.get27Keywords()
    for c, keyword in enumerate(keywords): 
        PythonVersionHandler.print_logging(str(c+1)+'.', keyword.upper() + ':')
        keyword = keyword.replace(' ', '_')
        folder = paths.joinPath(paths.joinPath(paths.HDFSRootFolder, 'weekAugust'), keyword)
        FinalizedRunners.trainForKeyword(keyword, folder, saving = False)
    Trainer.printOutputTable()
    Trainer.saveOutputTable()
    Trainer.outputTable = []
from Trainer import *
from TrainerOptions import *

opt = TrainerOptions()
opt.parse_args()

trainer = Trainer(opt)
trainer.train()
def trainExtendedPairs(keyword = 'iphone 7', onlyFollowings = False, AllPageButId = False):
    import Trainer, paths
    outputFolder = paths.joinPath(paths.HDFSRootFolder, 'weekAugust')
    pairsPath, productsPath = getLabeledPairsAndProductsPath(outputFolder, keyword, onlyFollowings = onlyFollowings, AllPageButId = AllPageButId)
    productVectorFolder = paths.newProductVectorFolder3
    Trainer.train(pairsPath, productVectorFolder, keyword = keyword)
Example #16
0
kitti_dir = "./KITTI"
data_depth_annotated_dir = "./KITTI/data_dir/train/data_depth_annotated_train.txt"
data_depth_velodyne_dir = "./KITTI//data_dir/train/data_depth_velodyne_train.txt"
data_RGB_dir = "./KITTI/data_dir/data_RGB_train.txt"
size = (512, 512) #input and output size

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

trainset = KITTI_DepthCompletion(kitti_dir, data_depth_annotated_dir, data_depth_velodyne_dir, data_RGB_dir, size, test=False)
dataloader = torch.utils.data.DataLoader(
                trainset, batch_size=batch_size,
                shuffle=True, num_workers=4
             )

trainer = Trainer(device)
writer = SummaryWriter()

def log_img(image, semantic, it):
    input_img = vutils.make_grid(image)
    input_array = input_img.to('cpu').numpy()

    semantic = F.softmax(semantic, dim=1)
    semantic = torch.argmax(semantic, dim=1, keepdim=True)
    semantic = vutils.make_grid(semantic)
    semantic = semantic.permute(1, 2, 0)
    semantic_array = semantic.to('cpu').numpy()[:,:,0]
    semantic_img = utils.color_semantic_label(semantic_array)
    semantic_img = np.transpose(semantic_img, (2, 0, 1))

    writer.add_image('input', input_array, it)
Example #17
0
# Reformat data into CSV
#
# @autor Luke Munro

import Trainer, csv
import DeepNN as NN
import sys as SYS

Ash = Trainer.Trainer(24, 3, NN.NNet(24, 3))
f = open("move_record3#{0}.csv".format(SYS.argv[1]), "wt")
writer = csv.writer(f)
raw_data = Ash.data_from_record(SYS.argv[1])
for pair in raw_data:
    old_state = pair[0]
    new_state = pair[1]
    move = Ash.get_training_move(old_state, new_state).reshape(1,
                                                               24).tolist()[0]
    old_state.append(move.index(1))
    print move
    writer.writerow(old_state)
f.close()
Example #18
0
def Agent(): 
   #X = input matrix
    #X = np.array(([[3,5], [5,1], [10,2]]), dtype=float)
    #y = desired output
    #y = np.array([[75], [82], [93]], dtype=float)
    print("inside agent")
    #initializing neural network
    nn = Neural_Network()
    #initialiing Trainer
    T = tr.trainer(nn)
    #training data
    train_X = read_dataset_from_csv('F:\\Projects\\FYP\\ANN\\training_set.csv')
    train_y = read_dataset_class_from_csv('F:\\Projects\\FYP\\ANN\\training_set.csv')
    X = np.array((train_X), dtype=float)
    y = np.array((train_y), dtype=float)
    np.transpose(X)
    np.transpose(y)
    
    
    #Normalization
    X = X/np.amax(X, axis=0)
    y = y/100
    
    
    
    #validation data
    val_X = read_dataset_from_csv('F:\\Projects\\FYP\\ANN\\validation_set.csv')
    val_y = read_dataset_class_from_csv('F:\\Projects\\FYP\\ANN\\validation_set.csv')
    vx = np.array((val_X), dtype=float)
    vy = np.array((val_y), dtype=float)
    np.transpose(vx)
    np.transpose(vy)
    
    vx = vx/np.amax(vx, axis=0)
    vy = vy/100
    
    
    
    T.train(X,y, vx, vy)
    #yhat = nn.feed_forward(vx)
    
    
    
    test_X = read_dataset_from_csv('F:\\Projects\\FYP\\ANN\\test_set.csv')
    #test_y = read_dataset_class_from_csv('F:\\Projects\\FYP\\ANN\\test_set.csv')
    tx = np.array((test_X), dtype=float)
    #ty = np.array((test_y), dtype=float)
    np.transpose(tx)
    #np.transpose(ty)
    
    tx = tx/np.amax(tx, axis=0)
    #ty = ty/100
    
    nx = np.array(([[90.9, 75.4, 5, 0]]), dtype=float)
    np.transpose(nx)
    nx = nx/np.amax(nx, axis=0)

    #T.train(vx, vy, tx, ty)
    that = nn.feed_forward(tx)
    #yhat = nn.feed_forward(nx)
    print(that, " :that")
    return that
# Demo 1, Train a classifier

import Trainer
import Util


Trainer.trainCascadeClassifier(Util.DEFAULT_MAX_FPR,
                               Util.DEFAULT_MIN_DR,
                               Util.DEFAULT_TARGET_FPR,
                               4900, Util.DEFAULT_FACE_IMAGE_PATH_PREFIX,
                               7900, Util.DEFAULT_NON_FACE_IMAGE_PATH_PREFIX,
                               Util.DEFAULT_PARTITION_PERCENTAGE,
                               Util.DEFAULT_JSON_FILE)
Example #20
0
if __name__ == '__main__':

    if len(sys.argv) < 4:
        print("Error: missing files")
        exit(-1)

    classes = 3

    training_data = numpy.genfromtxt(sys.argv[1], delimiter=',', dtype="|U5")
    training_labels = numpy.genfromtxt(sys.argv[2], delimiter=',')
    test_data = numpy.genfromtxt(sys.argv[3], delimiter=',', dtype="|U5")

    training_data, test_data = Preparations.Preparations(
        training_data, test_data).prepare(1)

    perceptron_weights, svm_weights, pa_weights = Trainer.Trainer(
        training_data, training_labels, classes).train_all_simul()

    tester = Tester.Tester(test_data, perceptron_weights, svm_weights,
                           pa_weights)

    tester.test()

    if len(sys.argv) == 5 and True:  # debug mode
        perceptron_success_rate, svm_success_rate, pa_success_rate = tester.calculate_statistics(
            numpy.genfromtxt(sys.argv[4], delimiter=','))
        print("succeeds rate: per: {}, svm:{}, pa: {}".format(
            perceptron_success_rate, svm_success_rate, pa_success_rate))
        Grapher.Grapher(training_data, training_labels, test_data,
                        numpy.genfromtxt(sys.argv[4]),
                        classes).perceptron_graph()
    else:  # testing mode
Example #21
0
 def processFace(self):
     Trainer.exe()
     pass
Example #22
0
import gym
import numpy as np
from common import calc_returns
from Agents.Networks import ReinforceNetwork
from Agents.Networks import ActorCriticNetwork
from Agents.ReinforceAgent import ReinforceAgent
from Agents.ActorCriticAgent import ActorCriticAgent
import Trainer as trn

env = gym.make('CartPole-v0')
env._max_episode_steps = 1000
obs = env.reset()

#Agents
acNetwork = ActorCriticNetwork(4, 2)
acAgent = ActorCriticAgent(acNetwork)

reinforceNetwork = ReinforceNetwork(4, 2)
reinforceAgent = ReinforceAgent(reinforceNetwork)

trnOpts = trn.TrainOpts()
trnOpts.n_epochs = 100
trnOpts.n_episodes = 100
trainer = trn.Trainer(agent=reinforceAgent, env=env, opts=trnOpts)
trainer.train()
trainer.test()
acAgent.save_model("ReinforceAgent_1")


#!/usr/bin/env python

import Trainer
import datetime
import argparse
import pybulletgym.envs

trainer = Trainer.Trainer()

argparser = argparse.ArgumentParser()
Trainer.add_opts(argparser)

# precoded options
opts = argparser.parse_args()
opts.agent = "KerasDDPGAgent-v0"
opts.env = "PybulletInvertedDoublePendulum-v0"
opts.train_for = 10000000
opts.test_for = 0
datenow = '{:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
opts.save_file = "checkpoints/%s-%s-%s.h5" % (opts.agent, opts.env, datenow)

print("\n OPTS", opts)

trainer.setup_exercise(opts)
Example #24
0
import RPi.GPIO as GPIO
import os, math

import time

import random

from threading import Thread

from button import Button, BUTTON_PRESSED, BUTTON_DOUBLECLICKED

import kociemba

import Trainer as train

train.init()

#_______________Konstanten________________

GPIO.setmode(GPIO.BOARD)

PLUS_BUTTON = 11  # an die gewählten GPIO anpassen
MINUS_BUTTON = 13  # an die gewählten GPIO anpassen
ENTER_BUTTON = 15  # an die gewählten GPIO anpassen

LINKS_DREH = 36  # an die gewählten GPIO anpassen
LINKS_GRIP = 37  # an die gewählten GPIO anpassen
RECHTS_DREH = 18  # an die gewählten GPIO anpassen
RECHTS_GRIP = 16  # an die gewählten GPIO anpassen

C180 = 0  # 1 = 180° Drehbewegung (mind. 270 ° Servo); else = 90° (standard)
Example #25
0
    train_x, train_y = shuffle(train_x, train_y)

    validation_size = int(train_x.shape[0] * 0.2)
    validation_x, validation_y = train_x[-validation_size:, :], train_y[-validation_size:]
    train_x, train_y = train_x[:-validation_size, :], train_y[:-validation_size]
    train_x = train_x / 255.0
    validation_x = validation_x / 255.0
    test_x = test_x / 255.0
    return train_x, train_y, validation_x, validation_y, test_x

def load_data_without_validation(train_data, train_class, test_data):
    train_x = numpy.loadtxt(train_data)
    train_y = numpy.loadtxt(train_class)
    test_x = numpy.loadtxt(test_data)
    train_x, train_y = shuffle(train_x, train_y)
    return train_x, train_y, test_x


if __name__ == '__main__' :
    # train_x, train_y, validation_x, validation_y, test_x = load_data_with_validation("train_x", "train_y", "test_x")

    train_x, train_y, test_x = load_data_without_validation("train_x", "train_y", "test_x")

    trainer = Trainer.Trainer(train_x, train_y)

    trainer.train_without_validation()

    # trainer.train_with_validation(validation_x, validation_y)

    trainer.write_test_y(test_x)
import os
import numpy as np
import torch
import torch.nn as nn

import pyro
from pyro.distributions import Normal
from pyro.infer import SVI, Trace_ELBO
from pyro.optim import Adam

from Models import *
from Trainer import *
from DataGenerator import *

Trainer = Trainer()

#--------------------------------------------------------------------------------


def main(nnClassCount=nclasses):
    # "Define Architectures and run one by one"

    nnArchitectureList = [{
        'name': 'BN1',
        'model': BayesianMLP(),
        'ckpt': None
    }]

    for nnArchitecture in nnArchitectureList:
        runTrain(nnArchitecture=nnArchitecture)
Example #27
0
if os.path.exists(out) == False:
    os.mkdir(out)

## Load cuda for training with GPU ##
cuda = torch.cuda.is_available()
torch.manual_seed(1)
if cuda:
	torch.cuda.manual_seed(1)
	model = model.cuda()

trainer = Trainer.Regression_trainer(cuda=cuda,
                             model=model,
                             optimizer=optimizer,
                             train_loader=Train_loader,
                             test_loader=Valid_loader,
                             train_root_dir=None,
                             out=out,
                             output_model=None,
                             test_data=Valid_loader,
                             max_epoch=epoch_num,
                             batch_size=batch_size)

print("==start training==")


start_iteration = 0
start_epoch = 0

trainer.epoch = start_epoch
trainer.iteration = start_iteration
trainer.train_epoch()
Example #28
0
 def SetNetwork(self, nn, trainingSet):
     self.nn = nn
     self.trainer = Trainer.Trainer(nn, trainingSet, 2.0,
                                    lambda r: r * 0.95, 1000, 2, 1)
Example #29
0
parser.add_argument("--l-rate", type=float, default=0.0002, help="Learning rate for the network")
parser.add_argument("--use-gpu", action="store_true", default=True, help="Set to true to use gpu")
parser.add_argument("--tau", type=float, default=0.005, help="Used to update target network with polyak update")
parser.add_argument("--save-path", type=str, default="duckie_models/navigation", help="Path to save the model")
parser.add_argument("--save-freq", type=int, default=10000, help="Number of iterations to save the model")
parser.add_argument("--checkpoint-path", type=str, default="duckie_models/navigation/20210626-112148/multihead", help="Path to checkpoint. Use it to resume training")
parser.add_argument("--entropy-scale", type=float, default=0.2, help="Entropy scale used in loss functions")
parser.add_argument("--encoder-path", type=str, default="encoder_model/last", help="Path to the saved encoder model")

args = parser.parse_args()

opts = SACAgentOptions()

opts.exp_buffer_size =args.replay_buffer_size
opts.learning_rate = args.l_rate
opts.exp_batch_size = args.batch_size
opts.tau = args.tau
opts.use_gpu = args.use_gpu
opts.clustering = False
opts.save_frequency = args.save_freq
opts.render = True
opts.entropy_scale = args.entropy_scale

train_opts = trn.TrainOpts()
train_opts.n_epochs = 1
train_opts.n_episodes = args.n_episodes
train_opts.n_iterations = args.n_iterations # run for 100k iterations
train_opts.save_path = args.save_path
train_opts.checkpoint = args.checkpoint_path

train(args, opts, train_opts)
Example #30
0
                    help='[net] weight decay')
parser.add_argument('--lr',
                    default=0.1,
                    type=float,
                    help='[net] learning rate')
parser.add_argument('--ep', default=100, type=int, help='[net] epoch')
parser.add_argument('--beta',
                    default=0.3,
                    type=float,
                    help='[net] hyperparameter for pre-class loss weight')
parser.add_argument('--pmp',
                    default=pmp,
                    type=str,
                    help='[net] pre-trained model path')
args = parser.parse_args()

# Read (if it exist) or generate data for training
h5_fn = './data/%s_%d_%d_%d_%d.h5' % (args.dn, args.sr, args.ws, args.hs,
                                      args.mel)
h5_fn = '/home/fearofchou/%s_%d_%d_%d_%d.h5' % (args.dn, args.sr, args.ws,
                                                args.hs, args.mel)
data = get_h5_dataset(h5_fn, training_set_csv_fn, test_set_csv_fn,
                      training_set_wav_fp, test_set_wav_fp, classes_fn, args)

# build model
model = nn.DataParallel(Net(args.mel, data['Ytr'].shape[1]).cuda())

# Train
Trer = Trainer(data, model, args)
Trer.fit()
Example #31
0
                    weight_decay=args.weight_decay)

start_epoch = args.start_epoch
if args.resume_dir:
    training_aux = Utility.Training_aux(args.resume_dir)
    start_epoch, best_prec1 = training_aux.load_checkpoint(model, optimizer, is_best = True)

print('The time of init model, criterion and optimizer:{0}\t'.format(Utility.format_time(time.time() - begin)))
#----------------------------------------
# creating trainer
#----------------------------------------
begin = time.time()
trainer = Trainer.Trainer(train_loader = dataloaders['train'], val_loader = dataloaders['val'],
                        model = model, criterion = criterion, optimizer = optimizer, 
                        nEpoch = args.nEpoch, lr_base = args.lr_base, lr_end=args.lr_end, lr_decay_method = args.lr_decay_method, 
                        is_soft_regu = args.is_soft_regu, is_SRIP = args.is_SRIP, soft_lambda = args.soft_lambda, 
                        svb_flag = args.svb_flag, iter_svb_flag = args.iter_svb_flag, svb_factor = args.svb_factor, 
                        bbn_flag = args.bbn_flag, bbn_factor = args.bbn_factor, bbn_type = args.bbn_type,
                        fsave = args.save, print_freq = args.print_freq)

print('The time of init trainer:\t{0}'.format(Utility.format_time(time.time() - begin)))
#----------------------------------------
# mian
#---------------------------------------- 
def main():
    global args, start_epoch, best_prec1
    if args.is_evaluate == True:
        begin = time.time()
        print('Evaluating on testing set:')
        trainer.validate(0)
        print('==> The time of evaluating:\t{0}'.format(Utility.format_time(time.time() - begin)))
Example #32
0
def Use_Dicts(global_variables,demand_variables):
	for i in range(len(global_variables)):
		for j in range(len(demand_variables)):
			reload_all()

			#%matplotlib qt5

			# RE DEFINE PATH TO THE RESULTS FOLDER
			path = 'C:/Users/danie/Dropbox/BeerGame/'
			TS = global_variables[i]['TS']
			Mu = global_variables[i]['mu']
			Sigma = global_variables[i]['sigma']
			constant_ld = global_variables[i]['ltavg']
			periods = 40
			'''
			old way of using all demand types
			#il faut choisir le type de demand et les actions possibles
			#ensuite lancer avec les variations des autres variables avec le dictionnaire crée
			'''
			demand_type = global_variables[i]['demand_type']
			if demand_type == "Seasonal":
				demand = Demand.Seasonal_Demand(15, 5, 0, 1.5, 0, Mu - 2, Sigma)
			elif demand_type == "Growing":
				demand = Demand.Growing_Demand(0,(2*Mu/periods), 0, Sigma)
			elif demand_type == "Sporadic":
				demand = Demand.Sporadic_Demand(Mu,0.2,5)
				#demand.generate(periods)
				#bench_agent = Agent.BS_Agent_Gauss(1, Sigma, TS, Mu)
			elif demand_type == "Gaussian":
				demand= Demand.Gaussian_Demand(Mu, Sigma, min_value = 0, max_value = 100)
				#demand = Demand.Gaussian_Demand(global_variables[i]['Mu'],global_variables[i]['Sigma'],global_variables[i]['Min'],global_variables[i]['Max'])
			elif demand_type =="Uniform":
				demand = Demand.Uniform_Demand(Mu ,Mu,Step = 1)
			elif demand_type == "Growingseasonal":
				demand = Demand.Growing_Seasonal_Demand(1,[Mu*0.5,Mu* 0.8,Mu*0.7,Mu*0.9,Mu,Mu,Mu * 0.9,Mu*1.2,Mu,Mu*1.1,Mu*1.5,Mu*2], Sigma)
			elif demand_type == "Mixedseasonal":
				demand = Demand.Mixed_Saisonnalities_Demand(Mu, [1,1,2,2,2,3,4,4,2,1,1,4],[0.6,0.8,0.7,0.9], Sigma)
			elif demand_type == "Growthstable": 
				demand = Demand.Growth_Stable_Demand(0, 1, Mu + 5, Sigma)
			else:
				print("Did not recognize demand type")
				break
			bench_agent = demand.bench_agent(global_variables[i]['pos'],global_variables[i]['TS'],periods)
			game_params = {
			    'client_demand':demand,
			    'lead_times':[ld.Constant_LeadTime(global_variables[i]['lt'][0]), ld.Constant_LeadTime(global_variables[i]['lt'][1]), 
			              ld.Constant_LeadTime(global_variables[i]['lt'][2]), ld.Constant_LeadTime(global_variables[i]['lt'][3])],
			    'AI_possible_actions': np.arange(-10,10),
			    'm' : global_variables[i]['m'],
			    'shortage_cost':get_optimal_gaussian_SC(TS, Mu = Mu, Sigma= Sigma, lead_time=constant_ld),
			    'TS' : TS,
			    'holding_cost':1,
			    'initial_inventory':constant_ld * Mu + 2* Sigma,
			    'number_periods':periods,
			    'use_backorders':0,
			    'state_features':["IL" ,"d", "BO", "RS", "OO","t"],
			    'AI_DN':[10,10],   # Not implemented yet
			    'comparison_agent' : bench_agent
			}
			'''
						Alternatives to above to be more flexible
							{
						    'client_demand': demand,
						    'lead_times':(global_variables[i]['leadtimes'], global_variables[i]['leadtimes'], 
						              global_variables[i]['leadtimes'], global_variables[i]['leadtimes']),
						    'initial_inventory':global_variables[i]['leadtimes'].Mean*10,
							}
							Need to make changes to functions that creates the dictionary list
							'''
			list_agents = ['BS20','BS20', 'BS20' , 'BS20']
			list_agents[global_variables[i]['pos']] = 'DQN'
			agents = generate_agents(list_agents, game_params)
			trainer = Tr.Trainer(agents, game_params)
			comparator = trainer.generate_comparator(min_BS_level = 5, max_BS_level = 20)

			trainer.train2(400)

			AI_Agent = trainer.best_AI_agent #.get_AI_agent()
			AI_Agent.label = 'best DQN'

			comparator.update_AI_Agents([trainer.best_AI_agent, trainer.get_AI_agent()])
			comparator.launch_comparison()

			comparator.histograms()
			comparator.one_game_results([trainer.get_AI_agent()])

			importlib.reload(Saver)
			saver = Saver.Saver(path)
			saver.clean_results_folder()
			saver.save(trainer)
Example #33
0
def main():
    rebuild_vocab = False
    if rebuild_vocab:
        trainfile = '/D/home/lili/mnt/DATA/convaws/convdata/conv-test_v.json'
        train = pd.read_json(trainfile)
        print('Read training data from: {}'.format(trainfile))

        valfile = '/D/home/lili/mnt/DATA/convaws/convdata/conv-val_v.json'
        val = pd.read_json(valfile)
        print('Read validation data from: {}'.format(valfile))
        train_srs = train.context.values.tolist()
        train_tgt = train.replies.values.tolist()
        val_srs = val.context.values.tolist()
        val_tgt = val.replies.values.tolist()
        src_vocab, _ = hierdata.buildvocab(train_srs + val_srs)
        tgt_vocab, tgtwords = hierdata.buildvocab(train_tgt + val_tgt)

    else:
        print('load vocab from pt file')
        dicts = torch.load('test_vocabs.pt')
        #tgt = pd.read_json('./tgt.json')
        #src = pd.read_json('./src.json')
        src_vocab = dicts['src_word2id']
        tgt_vocab = dicts['tgt_word2id']
        tgtwords = dicts['tgt_id2word']
        print('source vocab size: {}'.format(len(src_vocab)))
        print('source vocab test, bill: {} , {}'.format(
            src_vocab['<pad>'], src_vocab['bill']))
        print('target vocab size: {}'.format(len(tgt_vocab)))
        print('target vocab test, bill: {}, {}'.format(tgt_vocab['<pad>'],
                                                       tgt_vocab['bill']))
        print('target vocat testing:')
        print('word: <pad> get :{}'.format(tgtwords[tgt_vocab['<pad>']]))
        print('word: bill get :{}'.format(tgtwords[tgt_vocab['bill']]))
        print('word: service get :{}'.format(tgtwords[tgt_vocab['service']]))

    parser = argparse.ArgumentParser(description='train.py')

    # opts.py
    opts.add_md_help_argument(parser)
    opts.model_opts(parser)
    opts.train_opts(parser)
    opt = parser.parse_args()

    dummy_opt = parser.parse_known_args([])[0]

    opt.cuda = opt.gpuid[0] > -1
    if opt.cuda:
        torch.cuda.set_device(opt.gpuid[0])

    checkpoint = opt.model
    print('Building model...')
    model = ModelHVAE.make_base_model(
        opt, src_vocab, tgt_vocab, opt.cuda, checkpoint
    )  ### Done  #### How to integrate the two embedding layers...
    print(model)
    tally_parameters(model)  ### Done

    testfile = '/D/home/lili/mnt/DATA/convaws/convdata/conv-val_v.json'
    test = pd.read_json(testfile)
    print('Test training data from: {}'.format(testfile))

    test_srs = test.context.values.tolist()
    test_tgt = test.replies.values.tolist()

    test_batch_size = 16
    test_iter = data_util.gen_minibatch(test_srs, test_tgt, test_batch_size,
                                        src_vocab, tgt_vocab)

    tgtvocab = tgt_vocab

    optim = Optim.Optim('adam', 1e-3, 5)
    train_loss = Loss.VAELoss(model.generator, tgtvocab)
    valid_loss = Loss.VAELoss(model.generator, tgtvocab)
    trainer = Trainer.VaeTrainer(model, test_iter, test_iter, train_loss,
                                 valid_loss, optim)
    valid_stats = trainer.validate()
    print('Validation perplexity: %g' % valid_stats.ppl())
    print('Validation accuracy: %g' % valid_stats.accuracy())
train_loader, test_loader = get_imagenet_dataset(imagenet_path, 64, 8)

criterion = nn.CrossEntropyLoss()
criterion.cuda()
init_lr = 0.01

optimizer = torch.optim.SGD(model.parameters(),
                            lr=init_lr,
                            momentum=0.9,
                            weight_decay=1e-4)


def lr_sched(optimizer, epoch):
    lr = init_lr * (0.1**(epoch // 30))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


Trainer.start(model=model,
              optimizer=optimizer,
              train_dataloader=train_loader,
              test_dataloader=test_loader,
              criterion=criterion,
              max_epoch=61,
              lr_sched=lr_sched,
              display_freq=50,
              output_dir=TAG,
              save_every=1,
              max_keep=50)
Example #35
0
# 	elif type(n) is note.Rest:
# 		good = note.Rest()
# 		good.quarterLength = n.quarterLength
# 		return good


# def add_fifths_intervals(notes_and_rests):
# 	ind_of_last_note = -1
# 	arr = []
# 	for i in range(len(notes_and_rests)):
# 		if ind_of_last_note == -1 and type(notes_and_rests[i]) == note.Note:
# 			ind_of_last_note = i
# 		elif type(notes_and_rests[i]) == note.Note:
# 			arr += [get_fifths_distance(notes_and_rests[ind_of_last_note], notes_and_rests[i])]
# 			ind_of_last_note = i
# 	return arr

# s = midi_to_stream('the_lady_is_a_tramp.midi')
# sp = ScoreParser(s)
# ideas = sp.get_solo_ideas_from_measure(sp.score[sp.solo_parts[55]][55])
# idea = ideas[0]

# converted = convert_idea(idea)





from Trainer import *
t = Trainer()
t.get_training_set()
Example #36
0
    print "Writing training set"
    with open(ptrain, "wb") as f:
        pickle.dump(train, f)

if os.path.exists(ptest):
    print "Reading test set"
    with open(ptest, "rb") as f:
        test = pickle.load(f)
else:
    testset, testlabels = DatasetLoader.LoadRandomizedDataset(
        testset_folder, 2000, True)
    test = (testset, testlabels)

    print "Writing test set"
    with open(ptest, "wb") as f:
        pickle.dump(test, f)

print "Training and test sets ready"

trainer = Trainer.Trainer(network)
#print network.Test(*test)
cost = MeanSquareCost.MeanSquareCost()

print "Training Set Shape: " + str(train[0][0].shape)
print "Training the network"
trainer.SGD(train[0], train[1], cost, 0.05, 100, 10, test, 0.87)
print "Saving the network"
with open(pnetwork, "wb") as f:
    pickle.dump(network, f)

print "Success rate: " + str(network.Test(*test) * 100) + "%"
Example #37
0
 def __init__(self, detect_que, event_que, args):
     self.detect_que = detect_que
     self.event_que = event_que
     self.args = args
     self.is_stop = Manager().Value('i', True)
     self.net = Trainer(self.args)
Example #38
0

for i in inNode:
    #通常の学習(3入力)
    t = Trainer.Trainer(True)
    t.TrainSetup("./learned_data.csv", "100node_{}".format(i), RNN_model1.RNN_model([100,100], [i,1]), thin=50, target_epoch=1000, Interrupt=100, Norm=[100, 100, 100], TestInputFile="./unknow_data.csv")
    t.Train()

for i in inNode:
    #通常の学習(3入力)
    t = Trainer.Trainer(True)
    t.TrainSetup("./learned_data.csv", "10node_{}".format(i), RNN_model1.RNN_model([10,10], [i,1]), thin=50, target_epoch=1000, Interrupt=100, Norm=[100, 100, 100], TestInputFile="./unknow_data.csv")
    t.Train()
'''

t = Trainer.Trainer(False)
model = RNN_model1.RNN_model([50, 50], [3, 1])
serializers.load_npz(
    "./defore_dataset_learned/Linear_1_LSTM_1_Linear_1/50node_3/50node_3.model",
    model)
t.TestSetup("./log/unknow_data.csv", model, thin=1, Norm=[100, 100, 100])
TestSpeedDataSize = t._TestSpeedData.size
print TestSpeedDataSize
start1 = time.time()
t.Test()
elapsed_time1 = time.time() - start1
print("elapsed_time:{0}".format(elapsed_time1) + "[sec]")
start2 = time.time()
t.TestSP()
elapsed_time2 = time.time() - start2
print("elapsed_time:{0}".format(elapsed_time2) + "[sec]")
Example #39
0
class Evaluator:
    name = ""

    train_data = None
    eval_feats = None
    eval_annos = None

    trainer    = None
    classifier = None

    def __init__(self, name):
        self.name = name
        self.init()

    def init(self):
        self.train_data = None
        eval_feats      = None
        eval_annos      = None
        self.trainer    = Trainer()

        return self

    def log(self, message, section=None):
        msg = ''
        if section:
            msg = '[%s|%s] ' % (self.name, section)
        elif self.name:
            msg = '[%s] ' % self.name
        else:
            msg = ''
        msg += message

        print(msg)

        global logf
        logf.write(msg + '\n')

    def train_on(self, feats, annotations):
        global TrainingData
        name = annotations.name
        if name in TrainingData:
            self.train_data = TrainingData[name]
        else:
            self.train_data = FeatMat()
            self.train_data.add(feats, annotations)
            TrainingData[name] = self.train_data
        return self

    def evaluate_on(self, feats, annotations):
        global TrainingData
        name = annotations.name
        if name in TrainingData:
            self.eval_data = TrainingData[name]
        else:
            self.eval_data = FeatMat()
            self.eval_data.add(feats, annotations)
            TrainingData[name] = self.eval_data

        self.eval_feats = feats
        self.eval_annos = annotations
        return self

    def train(self):
        global Models
        name = self.eval_data.name
        if name in Models:
            self.trainer = Models[name]
        else:
            Models[name] = self.trainer.train(self.train_data, persist=False)
        self.classifier = Classifier(self.trainer)
        return self

    def evaluate(self):
        def log(msg):
            self.log(msg, 'eval')

        print('')

        errs = []
        C = 0                    # Correctly estimated
        I = 0                    # Inconsistent annotations
        A = 0                    # in Agreement (annotations)
        C_A = 0                  # correctly estimated where annotations in agreement
        T = len(self.eval_annos) # Total

        for fname, fclassifs in self.eval_annos.data.iteritems():

            # Get ground truth
            tru = np.median(fclassifs)
            tru_cls = 1 if tru > 0.5 else 0

            # Get prediction
            est = self.classifier.predictFeats(self.eval_feats[fname])
            est_cls = 1 if est > 0.5 else 0

            if tru_cls == est_cls:
                C += 1

            # If any inconsistent classifications
            if len(np.unique(fclassifs)) > 1:
                I += 1
            else:
                A += 1
                if tru_cls == est_cls:
                    C_A += 1

            # Add errors
            errs.append(abs(tru - est))

        A   = float(A)
        C_A = float(C_A)
        T   = float(T)

        global logf
        logf.write('\n')
        if self.eval_annos.n > 1:
            log('%d/%d (%.2f%%) annotations in agreement' % (A, T, A/T*100))
            log('%d/%d (%.2f%%) incorrect for annotations in agreement' % (A-C_A, A, (A-C_A)/A*100))

        log('%d/%d (%.2f%%) incorrect' % (T-C, T, (T-C)/T*100))

        l1err = float(np.linalg.norm(errs, 1)) # L1-error norm
        l2err = float(np.linalg.norm(errs, 2)) # L2-error norm
        log('L1-error: %.3f' % (l1err / len(errs)))
        log('L2-error: %.3f' % (l2err / len(errs)))

        return (T-C)/T*100

    def print_correlations(self, annotations):
        pairs = combinations(annotations, 2)
        Rs = []
        for anno1, anno2 in pairs:
            _, cls1 = zip(*sorted(anno1.data.items()))
            _, cls2 = zip(*sorted(anno2.data.items()))
            cls1 = [x[0] for x in cls1]
            cls2 = [x[0] for x in cls2]
            R, p = pearsonr(cls1, cls2)
            self.log('%s <-> %s: %f %g' % (anno1.name, anno2.name, R, p))
            Rs.append(R)
        self.log('Mean R: %f\n' % np.mean(Rs))

    def plot_PR(self, ofpath, label):
        y_true = self.eval_data.y
        y_score = self.classifier.clf.decision_function(self.eval_data.X)

        # Scale random values to span same range as y_score
        y_score_maxp = np.max(y_score)
        y_score_maxn = -np.min(y_score)
        y_score_span = y_score_maxp + y_score_maxn
        y_rand = np.random.rand(y_true.shape[0], 1) * y_score_span - y_score_maxn

        # Calculate PR (model)
        precision, recall, _ = precision_recall_curve(y_true, y_score)
        area = average_precision_score(y_true, y_score)

        x = np.linspace(0, 1, 200)
        y = np.interp(x, np.flipud(recall), np.flipud(precision))

        # Calculate PR (random)
        precision, recall, _ = precision_recall_curve(y_true, y_rand)
        area_rand = average_precision_score(y_true, y_rand)
        y_rand = np.interp(x, np.flipud(recall), np.flipud(precision))

        txtpath = 'PR.out'
        try:
            dat = np.genfromtxt(txtpath, delimiter='\t', names=True,
                                deletechars='', replace_space=False)
        except:
            dat = np.array(x, dtype=[('Recall', float)])

        # Add model PR
        dat = append_fields(dat, '%s (area: %.2f)' % (label, area), y)

        # Add random PR
        dat = append_fields(dat, 'Random classifier (area: %.2f)' % area_rand, y_rand)

        np.savetxt(txtpath, dat, delimiter='\t',
                   header='\t'.join(dat.dtype.names), comments='')

        if ofpath:
            title = 'Precision-Recall curve'
            ylabel = 'Precision'
            generate_graph(txtpath, ofpath, title, ylabel)
        return self

    def cleanup(self):
        return self.init()
Example #40
0
# Neural Net w/ TensorFlow
#
# @author Luke Munro
##

import sys
import tflearn
import Trainer
import utils as UTIL
import DeepNN as NN
from tflearn.data_utils import load_csv

file_num = sys.argv[1]
numMoves = 24
AI = NN.NNet(numMoves, 3, [10, numMoves])
trainer = Trainer.Trainer(24, 3, AI)

reader = tf.TextLineReader()  # FIGURE THIS OUT, HOW TO SPLIT X AND Y
all_data = trainer.data_from_record(file_num)
data, labels = tflearn.load_csv(all_data)
data = []
labels = []
for pair in all_data:
    data.append(pair[0])
    labels.append(trainer.get_training_move(pair[0], pair[1]))

net = tflearn.input_data(shape=[None, 24])
net = tflearn.fully_connected(net, 100)
net = tflearn.fully_connected(net, 24, activation='softmax')
net = tflearn.regression(net)
Example #41
0
class Evaluator:
    name = ""

    train_data = None
    eval_feats = None
    eval_annos = None

    trainer    = None
    classifier = None

    def __init__(self, name):
        self.name = name
        self.init()

    def init(self):
        self.train_data = None
        eval_feats      = None
        eval_annos      = None
        self.trainer    = Trainer()

        return self

    def log(self, message, section=None):
        msg = ''
        if section:
            msg = '[%s|%s] ' % (self.name, section)
        elif self.name:
            msg = '[%s] ' % self.name
        else:
            msg = ''
        msg += message

        print(msg)

        global logf
        logf.write(msg + '\n')

    def train_on(self, feats, annotations):
        global TrainingData
        name = annotations.name
        if name in TrainingData:
            self.train_data = TrainingData[name]
        else:
            self.train_data = FeatMat()
            self.train_data.add(feats, annotations)
            TrainingData[name] = self.train_data
        return self

    def evaluate_on(self, feats, annotations):
        global TrainingData
        name = annotations.name
        if name in TrainingData:
            self.eval_data = TrainingData[name]
        else:
            self.eval_data = FeatMat()
            self.eval_data.add(feats, annotations)
            TrainingData[name] = self.eval_data

        self.eval_feats = feats
        self.eval_annos = annotations
        return self

    def train(self):
        global Models
        name = self.eval_data.name
        if name in Models:
            self.trainer = Models[name]
        else:
            Models[name] = self.trainer
            self.trainer.train(self.train_data, persist=False)
            self.trainer.train_gist(self.train_data, persist=False)
        self.classifier = Classifier(self.trainer)
        return self

    def evaluate(self):
        def log(msg):
            self.log(msg, 'eval')

        print('')

        errs = []
        C = 0                    # Correctly estimated
        C_g = 0                  # - GIST + SVM
        C_r = 0                  # - random
        I = 0                    # Inconsistent annotations
        A = 0                    # in Agreement (annotations)
        C_A = 0                  # correctly estimated where annotations in agreement
        C_A_g = 0                # - GIST + SVM
        C_A_r = 0                # - random
        T = len(self.eval_annos) # Total

        for fname, fclassifs in self.eval_annos.data.iteritems():

            # Get ground truth
            tru = np.median(fclassifs)
            tru_cls = 1 if tru > 0.5 else 0

            # Get prediction
            est = self.classifier.predictFeats(self.eval_feats[fname])
            est_gist = self.classifier.predictFeats_gist(self.eval_feats.gist(fname))
            est_rand = randint(0, 1)
            est_cls = 1 if est > 0.5 else 0

            if tru_cls == est_cls:
                C += 1
            if tru_cls == est_gist:
                C_g += 1
            if tru_cls == est_rand:
                C_r += 1

            # If any inconsistent classifications
            if len(np.unique(fclassifs)) > 1:
                I += 1
            else:
                A += 1
                if tru_cls == est_cls:
                    C_A += 1
                if tru_cls == est_gist:
                    C_A_g += 1
                if tru_cls == est_rand:
                    C_A_r += 1

            # Add errors
            errs.append(abs(tru - est))

        A   = float(A)
        C_A = float(C_A)
        T   = float(T)

        global logf
        logf.write('\n')
        if self.eval_annos.n > 1:
            log('%d/%d (%.2f%%) annotations in agreement' % (A, T, A/T*100))
            log('%d/%d (%.2f%%) incorrect for annotations in agreement' % (A-C_A, A, (A-C_A)/A*100))
            log('%d/%d (%.2f%%) incorrect for annotations in agreement (GIST)' % (A-C_A_g, A, (A-C_A_g)/A*100))
            log('%d/%d (%.2f%%) incorrect for annotations in agreement (RAND)' % (A-C_A_r, A, (A-C_A_r)/A*100))

        log('%d/%d (%.2f%%) incorrect' % (T-C, T, (T-C)/T*100))

        l1err = float(np.linalg.norm(errs, 1)) # L1-error norm
        l2err = float(np.linalg.norm(errs, 2)) # L2-error norm
        log('L1-error: %.3f' % (l1err / len(errs)))
        log('L2-error: %.3f' % (l2err / len(errs)))

        return (T-C)/T*100

    def print_correlations(self, annotations):
        pairs = combinations(annotations, 2)
        Rs = []
        for anno1, anno2 in pairs:
            _, cls1 = zip(*sorted(anno1.data.items()))
            _, cls2 = zip(*sorted(anno2.data.items()))
            cls1 = [x[0] for x in cls1]
            cls2 = [x[0] for x in cls2]
            R, p = pearsonr(cls1, cls2)
            self.log('%s <-> %s: %f %g' % (anno1.name, anno2.name, R, p))
            Rs.append(R)
        self.log('Mean R: %f\n' % np.mean(Rs))

    def plot_PR(self, ofpath, label):
        y_true = self.eval_data.y
        y_score = self.classifier.clf.decision_function(self.eval_data.X)

        # Scale random values to span same range as y_score
        y_score_maxp = np.max(y_score)
        y_score_maxn = -np.min(y_score)
        y_score_span = y_score_maxp + y_score_maxn
        y_rand = np.random.rand(y_true.shape[0], 1) * y_score_span - y_score_maxn

        # Calculate PR (model)
        precision, recall, _ = precision_recall_curve(y_true, y_score)
        area = average_precision_score(y_true, y_score)

        x = np.linspace(0, 1, 200)
        y = np.interp(x, np.flipud(recall), np.flipud(precision))

        # Calculate PR (random)
        precision, recall, _ = precision_recall_curve(y_true, y_rand)
        area_rand = average_precision_score(y_true, y_rand)
        y_rand = np.interp(x, np.flipud(recall), np.flipud(precision))

        txtpath = 'PR.out'
        try:
            dat = np.genfromtxt(txtpath, delimiter='\t', names=True,
                                deletechars='', replace_space=False)
        except:
            dat = np.array(x, dtype=[('Recall', float)])

        # Add model PR
        dat = append_fields(dat, '%s (area: %.2f)' % (label, area), y)

        # Add random PR
        dat = append_fields(dat, 'Random classifier (area: %.2f)' % area_rand, y_rand)

        np.savetxt(txtpath, dat, delimiter='\t',
                   header='\t'.join(dat.dtype.names), comments='')

        if ofpath:
            title = 'Precision-Recall curve'
            ylabel = 'Precision'
            generate_graph(txtpath, ofpath, title, ylabel)
        return self

    def cleanup(self):
        return self.init()
Example #42
0
}

# 数据加载 训练集和测试
foodDataLoader,testLoader = DataLoader.getFoodDataLoader(para['batch_size'])
print('train batches:'+str(len(foodDataLoader)))
print('test batches:'+str(len(testLoader)))

# 网络 75个类,所以参数是75
Net = Networks.DenseNet(para['categoryNums'])

if para['useGPU'] == True:
    Net = Net.cuda()

# 使用SGD优化器 传入网络参数与学习率lr
optimizer = torch.optim.SGD(Net.parameters(),lr=para['lr'])

# 损失函数 交叉熵
lossFunc = torch.nn.CrossEntropyLoss()

# 训练器 传入优化器,网络,损失函数,训练数据加载器,测试数据加载器,还有参数
trainer = Trainer.Trainer(Net,optimizer,lossFunc,foodDataLoader,testLoader,para)

# 开始训练
trainer.train()

# 保存
torch.save(Net,'final_model.pkl')

# 测试
input('any key to start test')
trainer.test()
Example #43
0
import sys

import MergeAnsemble
import Trainer
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

if (len(sys.argv) >= 2):
    # print(sys.argv[1])
    if sys.argv[1] == '--train' or sys.argv[1] == '-t':
        Trainer.train()
    elif sys.argv[1] == '--recommend' or sys.argv[1] == '-r':
        argv = [int(i) for i in sys.argv[2:]]
        MergeAnsemble.recomend(argv)
    elif sys.argv[1] == "--accuracy" or sys.argv[1] == '-a':
        MergeAnsemble.printAccuracy()
    else:
        print("option : train, t, recommend, r, accuracy, a")
else:
    print("option : train, t, recommend, r, accuracy, a")
Example #44
0
target = boston.target
features = boston.feature_names
s = StandardScaler()
data = s.fit_transform(data)

X_train, X_test, y_train, y_test = train_test_split(data,
                                                    target,
                                                    test_size=0.3,
                                                    random_state=80718)

# make target 2d array
y_train, y_test = to_2d_np(y_train), to_2d_np(y_test)
# print(y_train)
# print(X_train)

trainer = Trainer(dl, SGD(dl, lr=0.001))
print("WOrking")

trainer.fit(X_train,
            y_train,
            X_test,
            y_test,
            epochs=10,
            eval_every=10,
            seed=20190501)

# print()
eval_regression_model(dl, X_test, y_test)

# trainer = Trainer(linear_regression, SGD(linear_regression, lr=0.01))
from Pokemon import *
from Move import *
from Trainer import *
from Item import *

job = Trainer(
    "Job",
    Pokemon("Rayquaza", 50, 165, 139, 85, 139, 85, 90,
            SpecialMove("Dragon Pulse", 85, 10),
            SpecialMove("Air Slash", 75, 15), PhysicalMove("Outrage", 120, 10),
            SpecialMove("Dragon Ascent", 120, 5)), 3000)

poon = Trainer(
    "Poon",
    Pokemon("Deoxys", 50, 110, 139, 49, 139, 49, 139,
            SpecialMove("Psychic", 90, 10),
            SpecialMove("Psycho Boost", 140, 5),
            PhysicalMove("Zen Headbutt", 80, 15),
            SpecialMove("Hyper Beam", 150, 5)), 3000)
#Test Buy item
print(poon.buy_item(HPHealItem(
    "Potion",
    20,
), 1, 100))
print(poon.buy_item(HPHealItem(
    "Potion",
    20,
), 1, 100))
print("Poon has " + str(poon.items[0].amount_in_bag) + " " +
      poon.items[0].name)
print(poon.buy_item(PPHealItem("PP restore", 10), 3, 100))
def run(cfg):
    # net = SEInception3(num_classes=cfg["num_classes"])
    net = ResNet50(num_classes=cfg["num_classes"])
    print("use gpu:", used_gpu)
    print("use model:", net.name)
    if cfg["saved_model"]:
        print("*-------Begin Loading Saved Models!------*")
        net.load_pretrained_model('saved_models/' + cfg["saved_model"],
                                  skip=["fc.weight", "fc.bias"])

    if len(used_gpu) > 1 and cuda:
        distri = True
        net = torch.nn.DataParallel(net)
    else:
        distri = False
    print("loaded model:", 'saved_models/' + cfg["saved_model"])
    print("whether distributed:", distri)

    if cfg['optimizer'] == 'SGD':
        optimizer = SGD(filter(lambda p: p.requires_grad, net.parameters()),
                        lr=cfg['initial_learning_rate'],
                        momentum=cfg['momentum'],
                        weight_decay=cfg['weight_decay'])
    elif cfg['optimizer'] == 'Adam':
        optimizer = Adam(filter(lambda p: p.requires_grad, net.parameters()),
                         lr=cfg['initial_learning_rate'],
                         weight_decay=cfg['weight_decay'])

    if cfg["optimizer_path"]:
        print("*-----Begin Loading Saved optimizer!-----*")
        load_optimizer(optimizer, 'saved_models/' + cfg['optimizer_path'])

    loss = F.cross_entropy
    trainer = Trainer(net, optimizer, loss, cfg['batch_size'], distri)
    lr_step = MultiStepLR(optimizer, [2, 4, 6], gamma=0.5)
    # lr_step = ReduceLROnPlateau(optimizer, 'min', patience=3)

    print("*----------Begin Loading Data!-----------*")
    data_frame = extract_categories_df(cfg['train_bson_path'])
    train_dataset = CdiscountTrain(cfg['train_bson_path'],
                                   data_frame,
                                   train_mask,
                                   transform=train_augment)
    train_loader = DataLoader(train_dataset,
                              sampler=RandomSampler(train_dataset),
                              batch_size=cfg['batch_size'],
                              drop_last=True,
                              num_workers=cfg['data_worker'])

    valid_dataset = CdiscountVal(cfg['train_bson_path'],
                                 data_frame,
                                 val_mask,
                                 transform=valid_augment)
    valid_loader = DataLoader(valid_dataset,
                              sampler=SequentialSampler(valid_dataset),
                              batch_size=cfg['batch_size'],
                              drop_last=False,
                              num_workers=cfg['data_worker'])

    print("*------------Begin Training!-------------*")
    trainer.loop(train_loader, valid_loader, lr_step)
Example #47
0
# source_dirs = ['Covid-19', 'Normal', 'Pneumonia']
# utils.data_augmentation(workspace, train_dir, source_dirs, False)

train_set = datasets.ImageFolder(train_dir, transform)
val_set = datasets.ImageFolder(val_dir, transform)
train_size = len(train_set)
val_size = len(val_set)
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=batch_size,
                                           shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set,
                                         batch_size=batch_size,
                                         shuffle=False)

model, epochs, train_accs, train_losses, val_accs, val_losses, train_f1_list, val_f1_list = trainer.train(
    model, train_loader, train_size, val_loader, val_size, device, criterion,
    optimizer, scheduler, num_epochs, workspace)

timestamp = str(datetime.datetime.now()).split('.')[0].split(' ')[0]
if os.path.isdir(os.path.join(workspace, 'checkpoints/best')) != True:
    os.mkdir(os.path.join(workspace, 'checkpoints/best'))
model_path = os.path.join(workspace, 'checkpoints/best', timestamp + '.pth')
torch.save(model.state_dict(), model_path)

utils.plot_loss_acc(timestamp, workspace, model_name, optimizer_name, epochs,
                    train_losses, val_losses, train_accs, val_accs)
utils.create_train_log(workspace, train_accs, train_losses, train_f1_list,
                       val_accs, val_losses, val_f1_list, model_name,
                       optimizer_name, criterion_name, lr, momentum, step_size,
                       gamma, num_epochs)
 def test_execute(self, mocked_params):
     ac = Trainer()
     ac.execute(params=mocked_params)
     assert not ac._params