def get_hierarchy(hierarchy, par="-", a={}, b={}, level=0):
    if hierarchy != None:
        rootNodes = hierarchy.findall("node")
        for node in rootNodes:
            parent = node.find("criterionID").text
            if not a.has_key(parent):
                a[parent] = Criterion(parent)
                a[parent].setParent(par)
                a[parent].level = level
            else:
                a[parent].setParent(par)
            get_hierarchy(node, parent, a, b, level + 1)
    return a
Exemplo n.º 2
0
def get_hierarchy(hierarchy, par='-', a={}, level=0):
    if hierarchy != None:
        rootNodes = hierarchy.findall("node")
        for node in rootNodes:
            parent = node.find("criterionID").text
            if not a.has_key(parent):
                a[parent] = Criterion(parent)
                a[parent].setParent(par)
                a[parent].level = level
            else:
                a[parent].setParent(par)
            get_hierarchy(node, parent, a, level + 1)
    return a
def buildNewHierarchy(a, root, hierarchyArray, newParentName=None):
    if newParentName == None:
        newParentName = root
    for criterion in hierarchyArray.values():
        if criterion.hasParent(root):
            if criterion.parentsNumber() > 1:
                for i in xrange(criterion.parentsNumber()):
                    newName = getNewCriterionName(criterion.name, newParentName)
                    a[newName] = Criterion(newName)
                    a[newName].level = hierarchyArray[criterion.name].level
                    a[newName].setParent(newParentName)
                    buildNewHierarchy(a, criterion.name, hierarchyArray, newName)
            else:
                a[criterion.name] = hierarchyArray[criterion.name]
                buildNewHierarchy(a, criterion.name, hierarchyArray)
    return a
Exemplo n.º 4
0
def readCommand(argv):
    "Processes the command used to run from the command line."
    from optparse import OptionParser
    parser = OptionParser(USAGE_STRING)

    parser.add_option(
        '-i',
        '--input_bin',
        help=default('The type of classifier'),
        default='CS_763_Deep_Learning_HW/input_criterion_sample_1.bin')
    parser.add_option('-t',
                      '--target_bin',
                      help=default('The size of the training set'),
                      default='CS_763_Deep_Learning_HW/target_sample_1.bin',
                      type="string")
    parser.add_option(
        '-g',
        '--ig',
        help=default('Whether to use enhanced features'),
        default='CS_763_Deep_Learning_HW/gradCriterionInput_sample_1.bin',
        type="string")
    # parser.add_option('-a', '--autotune', help=default("Whether to automatically tune hyperparameters"), default=False, action="store_true")
    # parser.add_option('-i', '--iterations', help=default("Maximum iterations to run training"), default=3, type="int")
    # parser.add_option('-s', '--test', help=default("Amount of test data to use"), default=TEST_SET_SIZE, type="int")
    # parser.add_option('-v', '--validate', help=default("Whether to validate when training (for graphs)"), default=False, action="store_true")
    # parser.add_option('-d', '--dataset', help=default("Specifies the data set to use"), choices=['d1', 'd2'], default='d1')
    # parser.add_option('-k', '--classes', help=default("Specifies the number of classes"), default=10, type="int")

    options, otherjunk = parser.parse_args(argv)
    if len(otherjunk) != 0:
        raise Exception('Command line input not understood: ' + str(otherjunk))
    args = {}

    input_bin_path = options.input_bin
    target_bin_path = options.target_bin
    grad_input_bin_path = options.ig

    input_bin = torch.tensor(torchfile.load(input_bin_path)).double()
    target_bin = torch.tensor(torchfile.load(target_bin_path)).double()
    grad_input_bin = torch.tensor(torchfile.load(grad_input_bin_path)).double()

    size = target_bin.shape[0]
    # print size
    for j in range(size):
        target_bin[j] -= 1

    # print target_bin
    # grad_Input_bin = options.grad_Input_bin
    # print grad_input_bin
    # loss = Criterion.forward(input_bin, target_bin)
    # gradLoss = Criterion.backward(input_bin, target_bin)
    criterion = Criterion.Criterion()
    loss = criterion.forward(input_bin, target_bin)
    print "Loss is -----", loss
    # print loss
    # print input_bin.shape
    # print gradLoss
    gradInput = criterion.backward(input_bin, target_bin)
def buildNewHierarchy(a, root, hierarchyArray, newParentName=None):
    if newParentName == None:
        newParentName = root
    for criterion in hierarchyArray.values():
        if criterion.hasParent(root):
            if criterion.parentsNumber() > 1:
                for i in xrange(criterion.parentsNumber()):
                    newName = getNewCriterionName(criterion.name,
                                                  newParentName)
                    a[newName] = Criterion(newName)
                    a[newName].level = hierarchyArray[criterion.name].level
                    a[newName].setParent(newParentName)
                    buildNewHierarchy(a, criterion.name, hierarchyArray,
                                      newName)
            else:
                a[criterion.name] = hierarchyArray[criterion.name]
                buildNewHierarchy(a, criterion.name, hierarchyArray)
    return a
def checkNewParetsName(item, array):
    a = {}
    for sth, crit in array.items():
        if crit.name == item.name:
            for i in xrange(times):
                new_name = getNewCriterionName(crit.name, crit.parent[i])
                a[new_name] = Criterion(new_name)
                a[new_name].setParent(crit.parent[i])
        else:
            a[sth] = array[sth]
    return a
def divideCriteria(item, array):
    a = {}
    for sth, crit in array.items():
        if sth == item:
            i = 0
            for parent in crit.parent:
                i += 1
                new_name = getNewCriterionName(item, parent)
                a[new_name] = Criterion(new_name)
                a[new_name].setParent(parent)
        else:
            a[sth] = array[sth]
    return a
Exemplo n.º 8
0
 def create_matrix(self, data, label):
     index = {l: i for i, l in enumerate(np.unique(label))}
     matrix = None
     labels_to_agg = np.unique(label)
     labels_to_agg_list = [[x] for x in labels_to_agg]
     label_dict = {
         labels_to_agg[value]: value
         for value in range(labels_to_agg.shape[0])
     }
     num_of_length = len(labels_to_agg_list)
     class_1_variety = []
     class_2_variety = []
     while len(labels_to_agg_list) > 1:
         score_result = np.inf
         for i in range(0, len(labels_to_agg_list) - 1):
             for j in range(i + 1, len(labels_to_agg_list)):
                 class_1_data, class_1_label = _get_data_subset(
                     data, label, labels_to_agg_list[i])
                 class_2_data, class_2_label = _get_data_subset(
                     data, label, labels_to_agg_list[j])
                 score = Criterion.agg_score(
                     class_1_data,
                     class_1_label,
                     class_2_data,
                     class_2_label,
                     score=Criterion.max_distance_score)
                 if score < score_result:
                     score_result = score
                     class_1_variety = labels_to_agg_list[i]
                     class_2_variety = labels_to_agg_list[j]
         new_col = np.zeros((num_of_length, 1))
         for i in class_1_variety:
             new_col[label_dict[i]] = 1
         for i in class_2_variety:
             new_col[label_dict[i]] = -1
         if matrix is None:
             matrix = new_col
         else:
             matrix = np.hstack((matrix, new_col))
         new_class = class_1_variety + class_2_variety
         labels_to_agg_list.remove(class_1_variety)
         labels_to_agg_list.remove(class_2_variety)
         labels_to_agg_list.insert(0, new_class)
     return matrix, index
Exemplo n.º 9
0
def train_and_test(my_model, trainingData, trainingLabels, noIters, batchSize,
                   alpha, lr):  # can add lambda
    # best_accuracy = 0
    # best_epoch = 0
    noBatches = int(trainingLabels.shape[0] / batchSize)
    my_criterion = Criterion.Criterion()

    # loss = np.zeros(noBatches)
    # accuracy = np.zeros(noBatches)

    for iter in range(noIters):
        for batch in range(noBatches):
            trData = trainingData[batch * batchSize:(batch + 1) * batchSize, :]
            trLabels = trainingLabels[batch * batchSize:(batch + 1) *
                                      batchSize]

            trOutputs = my_model.forward(trData, True)
            # loss_tr = my_criterion.forward(trOutputs, trLabels).item()

            dl_do = my_criterion.backward(trOutputs, trLabels)
            my_model.backward(trData, dl_do, alpha, lr)
            trOutputs = trOutputs.argmax(1)
Exemplo n.º 10
0
save_csv = False

#create models and optimizers
def model_relu():
    return  Sequential(Linear(2,25),Relu(),Linear(25,25),
                        Relu(),Linear(25,25), Relu(),Linear(25,2))
def model_tanh():
    return Sequential(Linear(2,25),Tanh(),Linear(25,25),
                        Tanh(), Linear(25,25), Tanh(),Linear(25,2))

def opti(model):
    return Optimizer.SGD(model.param(),lr = lr)
def opti_mom(model):
    return Optimizer.SGD(model.param(), lr = lr, momentum = True, mu = mu)

CE = Criterion.CrossEntropy()
MSE = Criterion.MSE()


##############################################################################
#                    test Relu vs Tanh with crossentropy
##############################################################################

test(model_relu, model_tanh, opti, opti, CE, CE, "relu", "tanh",
    repetitions = rep, message = "Relu vs Tanh with crossentropy",
    plots = plots, show_plots = show_plots, title_plots = "CE",
    save_result_csv = save_csv, filename = "../results/csv/CE.csv")

###############################################################################
#               test Relu vs Tanh with crossentropy and momentum
###############################################################################
Exemplo n.º 11
0
import torch
import loadModel
from dataset import Data
import Criterion


classifier = loadModel.load()
# device = 'cpu'
device = 'cuda:0'

trainData = Data(test=False)
criterion = Criterion.Criterion()

batch_size = 32
epochs = 200
alpha = 1e-2
momentum = 0.9

for epoch in range(epochs):
    if epoch == 60:
        alpha = 5e-3
    elif epoch == 100:
        alpha = 1e-3
    correct = 0
    count = 0
    for i in range(0, trainData.m, batch_size):
        # print i
        X, y = trainData.sample(batch_size, i)
        classifier.clearGradParam()
        y_pred = classifier.forward(X)
        # print y_pred
Exemplo n.º 12
0
def main():

    global char2index
    global index2char
    global SOS_token
    global EOS_token
    global PAD_token

    parser = argparse.ArgumentParser(description='Speech hackathon Baseline')
    parser.add_argument('--hidden_size',
                        type=int,
                        default=512,
                        help='hidden size of model (default: 256)')
    parser.add_argument('--layer_size',
                        type=int,
                        default=3,
                        help='number of layers of model (default: 3)')
    parser.add_argument('--dropout',
                        type=float,
                        default=0.2,
                        help='dropout rate in training (default: 0.2)')
    parser.add_argument('--input_dropout',
                        type=float,
                        default=0.2,
                        help='dropout rate in training (default: 0.2)')
    parser.add_argument(
        '--bidirectional',
        action='store_true',
        help='use bidirectional RNN for encoder (default: False)')
    parser.add_argument(
        '--use_attention',
        action='store_true',
        help='use attention between encoder-decoder (default: False)')
    parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        help='batch size in training (default: 32)')
    parser.add_argument(
        '--workers',
        type=int,
        default=4,
        help='number of workers in dataset loader (default: 4)')
    parser.add_argument('--max_epochs',
                        type=int,
                        default=10,
                        help='number of max epochs in training (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-04,
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--teacher_forcing',
                        type=float,
                        default=0.5,
                        help='teacher forcing ratio in decoder (default: 0.5)')
    parser.add_argument('--max_len',
                        type=int,
                        default=80,
                        help='maximum characters of sentence (default: 80)')
    parser.add_argument('--no_cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        help='random seed (default: 1)')
    parser.add_argument('--save_name',
                        type=str,
                        default='model',
                        help='the name of model in nsml or local')
    parser.add_argument('--mode', type=str, default='train')
    parser.add_argument("--pause", type=int, default=0)

    args = parser.parse_args()

    char2index, index2char = label_loader.load_label('./hackathon.labels')
    SOS_token = char2index['<s>']
    EOS_token = char2index['</s>']
    PAD_token = char2index['_']

    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device('cuda' if args.cuda else 'cpu')

    # N_FFT: defined in loader.py
    feature_size = N_FFT / 2 + 1
    feature_size = 128

    enc = EncoderRNN(feature_size,
                     args.hidden_size,
                     input_dropout_p=args.input_dropout,
                     dropout_p=args.dropout,
                     n_layers=args.layer_size,
                     bidirectional=args.bidirectional,
                     rnn_cell='lstm',
                     variable_lengths=False)

    dec = DecoderRNN(len(char2index),
                     args.max_len,
                     args.hidden_size * (2 if args.bidirectional else 1),
                     SOS_token,
                     EOS_token,
                     n_layers=args.layer_size,
                     rnn_cell='lstm',
                     bidirectional=args.bidirectional,
                     input_dropout_p=args.input_dropout,
                     dropout_p=args.dropout,
                     use_attention=args.use_attention)

    model = Seq2seq(enc, dec)

    model.flatten_parameters()

    for param in model.parameters():
        param.data.uniform_(-0.08, 0.08)

    model = nn.DataParallel(model).to(device)

    optimizer = optim.Adam(model.module.parameters(), lr=args.lr)
    #     criterion = nn.CrossEntropyLoss(reduction='sum', ignore_index=PAD_token).to(device)
    criterion = Criterion.SmoothingLoss(PAD_token, 0.2).to(device)
    infer_melspec = transforms.MelSpectrogram(sample_rate=16000,
                                              n_fft=512,
                                              n_mels=128)
    infer_todb = transforms.AmplitudeToDB(stype="magnitude", top_db=80)

    bind_model(model, infer_melspec, infer_todb, optimizer)

    if args.pause == 1:
        nsml.paused(scope=locals())

    if args.mode != "train":
        return

    data_list = os.path.join(DATASET_PATH, 'train_data', 'data_list.csv')
    wav_paths = list()
    script_paths = list()

    with open(data_list, 'r') as f:
        for line in f:
            # line: "aaa.wav,aaa.label"

            wav_path, script_path = line.strip().split(',')
            wav_paths.append(os.path.join(DATASET_PATH, 'train_data',
                                          wav_path))
            script_paths.append(
                os.path.join(DATASET_PATH, 'train_data', script_path))

    best_loss = 1e10
    begin_epoch = 0

    # load all target scripts for reducing disk i/o
    target_path = os.path.join(DATASET_PATH, 'train_label')
    load_targets(target_path)

    train_batch_num, train_dataset_list, valid_dataset = split_dataset(
        args, wav_paths, script_paths, valid_ratio=0.2)

    logger.info('start')

    train_begin = time.time()

    #     teacher_forcing = args.teacher_forcing
    nsml.load(checkpoint="model99", session="team38/sr-hack-2019-50000/9")

    for epoch in range(begin_epoch, args.max_epochs):

        train_queue = queue.Queue(args.workers * 2)

        train_loader = MultiLoader(train_dataset_list, train_queue,
                                   args.batch_size, args.workers)
        train_loader.start()

        train_loss, train_cer = train(model, train_batch_num, train_queue,
                                      criterion, optimizer, device,
                                      train_begin, args.workers, 10)
        logger.info('Epoch %d (Training) Loss %0.4f CER %0.4f' %
                    (epoch, train_loss, train_cer))

        #         teacher_forcing *= 0.95

        train_loader.join()

        valid_queue = queue.Queue(args.workers * 2)
        valid_loader = BaseDataLoader(valid_dataset, valid_queue,
                                      args.batch_size, 0)
        valid_loader.start()

        eval_loss, eval_cer = evaluate(model, valid_loader, valid_queue,
                                       criterion, device)
        logger.info('Epoch %d (Evaluate) Loss %0.4f CER %0.4f' %
                    (epoch, eval_loss, eval_cer))

        valid_loader.join()

        nsml.report(False,
                    step=epoch,
                    train_epoch__loss=train_loss,
                    train_epoch__cer=train_cer,
                    eval__loss=eval_loss,
                    eval__cer=eval_cer)

        best_model = (eval_loss < best_loss)
        nsml.save("{}{}".format(args.save_name, epoch))

        if best_model:
            nsml.save('best')
            best_loss = eval_loss
Exemplo n.º 13
0
#create model
model = Sequential(Linear(2, 25), Relu(), Linear(25, 25), Relu(),
                   Linear(25, 25), Relu(), Linear(25, 2))

#create data_sets with one hot encoding for MSE
train_input, train_target = generate_disc_data(one_hot_labels=True)
test_input, test_target = generate_disc_data(one_hot_labels=True)

#normalize the data
mean, std = train_input.mean(), train_input.std()
train_input.sub_(mean).div_(std)
test_input.sub_(mean).div_(std)

#define loss
criterion = Criterion.MSE()

#define optimizer
optim = Optimizer.SGD(parameters=model.param(), lr=1e-1)

#train the model
loss, accuracy = train(model,
                       criterion,
                       optim,
                       train_input,
                       train_target,
                       nb_epochs=200,
                       verbose=True)

#compute statistics on test
output = model.forward(test_input)
Exemplo n.º 14
0
def readCommand(argv):
    "Processes the command used to run from the command line."
    from optparse import OptionParser
    parser = OptionParser(USAGE_STRING)

    parser.add_option('-c',
                      '--config',
                      help=default('modelConfig'),
                      default='CS 763 Deep Learning HW/modelConfig_1.txt')
    parser.add_option('-i',
                      '--i',
                      help=default('input'),
                      default='CS 763 Deep Learning HW/input_sample_1.bin',
                      type="string")
    parser.add_option(
        '-g',
        '--og',
        help=default('gradoutput'),
        default='CS 763 Deep Learning HW/gradOutput_sample_1.bin',
        type="string")
    parser.add_option('-o', '--o', help=default('output'), type="string")
    parser.add_option('-w', '--ow', help=default('gradweights'), type="string")
    parser.add_option('-b', '--ob', help=default('gradb'), type="string")
    parser.add_option('-d', '--ig', help=default('gradinput'), type="string")

    options, otherjunk = parser.parse_args(argv)
    if len(otherjunk) != 0:
        raise Exception('Command line input not understood: ' + str(otherjunk))
    args = {}

    model_config_path = options.config
    input_path = options.i
    gradoutput_path = options.og
    output_path = options.o
    gradweights_path = options.ow
    gradb_path = options.ob
    gradinput_path = options.ig

    modelConfig_file = open(model_config_path, "r")
    data = modelConfig_file.readlines()

    my_model = Model.Model()
    my_criterion = Criterion.Criterion()

    input_weight = 0
    Bias_weight = 0

    Number_layer = int(data[0])
    for i in range(Number_layer):
        layer = data[1 + i].split()
        if (len(layer) > 1):
            my_model.addLayer(Linear(int(layer[1]), int(layer[2])))
        else:
            my_model.addLayer(ReLu())

    Path_sample_weight = data[Number_layer + 1][:-1]
    Path_sample_bias = data[Number_layer + 2][:-1]

    input = torchfile.load(input_path)
    input = torch.tensor(input).double().reshape((input.shape[0], -1))

    input_weight = torchfile.load(Path_sample_weight)
    input_bias = torchfile.load(Path_sample_bias)

    input_weight = [torch.tensor(weight).double() for weight in input_weight]
    input_bias = [
        torch.tensor(bias).double().reshape((-1, 1)) for bias in input_bias
    ]

    Outputs = my_model.forward2(input, input_weight, input_bias, True)
    dl_do = my_criterion.backward(Outputs, trLabels)
    # gradoutput = my_model.backward(input, dl_do, 0, 0)

    [gradInput, gradWeights, gradBias] = my_model.backward2(input, dl_do, 0, 0)

    torch.save(Outputs, output_path)
    torch.save(gradWeights, gradweights_path)
    torch.save(gradBias, gradb_path)
    torch.save(gradInput, gradinput_path)
Exemplo n.º 15
0
print(temp)
gradout = torch.rand(5,2)
print(gradout)
print(b.backward(gradout))
"""

inp = torch.randn(30, 100)
out = (torch.rand(30)*10).floor()
mymodel = Model()
linear = Linear(100, 25)
mymodel.addLayer(linear)
relu = ReLU()
mymodel.addLayer(relu)
linear2 = Linear(25, 10)
mymodel.addLayer(linear2)
lossClass = Criterion()

print(inp)
print(out)
print(mymodel.forward(inp))

learningRate = 1e-1
for i in range(10000):
	yPred = mymodel.forward(inp)
	lossGrad, loss = lossClass.backward(yPred, out)
	if i%200 == 0:
		print(i, loss)
	mymodel.clearGradParam()
	mymodel.backward(inp, lossGrad)
	for layer in mymodel.Layers:
		if layer.isTrainable:
Exemplo n.º 16
0
import torch
from Model import *
from readData4 import *
import numpy as np
from Criterion import *
import sys

torch.set_printoptions(precision=3)

torch.set_default_tensor_type('torch.DoubleTensor')
model = Model(-1,128,153,153,1)

lossClass = Criterion()


def printAcc(start,batch_size):
	print("\nPrinting Accuracy Now~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
	count = 0
	for i in range(batch_size):
		trial_data = data[start+i].view(1,-1)
		yPred = model.forward(trial_data)
		count += (int(yPred.view(1,-1).max(dim=1)[1])==int(labels[start+i]))
		#print(int(yPred.view(1,-1).max(dim=1)[1]),int(labels[i]),yPred.tolist())
	print(count/batch_size)
	print("\nAccuracy block over~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n")


def submitPred(str):
	total_test = 395
	temp_stdout = sys.stdout
	sys.stdout = open(str, "w")
Exemplo n.º 17
0
from Criterion import *

lossClass = Criterion()
inp = torch.FloatTensor([[-1.8168, 0.3020], [0.6831, 0.8920],
                         [-1.3641, -1.1230]])
target = torch.LongTensor([0, 1, 0])
print(lossClass.forward(inp, target))