Exemplo n.º 1
0
def test(args, model, test_path, criterion):
    current_loss = 0
    
    lossPerDays = []
    
    lossPerDays_avg = []

    model.eval()

    daylen = args.daytolook
    with torch.no_grad():
        iloop =0
        test_iter = FSIterator(test_path, args.batch_size, 1)
        for input, target, mask in test_iter:

            lossPerDay, loss = evaluate(args, model, input, mask, target, criterion)
            lossPerDays.append(lossPerDay[:daylen]) #n_batches * 10
            current_loss += loss
            iloop+=1

        lossPerDays = torch.stack(lossPerDays)
        lossPerDays_avg = lossPerDays.sum(dim =0)


        lossPerDays_avg = lossPerDays_avg/iloop

        current_loss = current_loss/iloop

    return lossPerDays_avg, current_loss
Exemplo n.º 2
0
def test(args, model, test_path, criterion):
    current_loss = 0
    current_acc = 0
    current_recall = 0

    lossPerDays = []
    accPerDays = []
    recallPerDays = []
    precisionPerDays = []
    f1PerDays = []

    lossPerDays_avg = []
    accPerDays_avg = []
    recallPerDays_avg = []
    precisionPerDays_avg = []
    f1PerDays_avg = []

    model = model.eval()

    daylen = args.daytolook
    with torch.no_grad():
        iloop = 0
        test_iter = FSIterator(test_path, args.batch_size, 1)
        for input, target, mask in test_iter:

            f1PerDay, precisionPerDay, recallPerDay, accPerDay, acc, lossPerDay, loss = evaluate(
                args, model, input, mask, target, criterion)
            lossPerDays.append(lossPerDay[:daylen])  #n_batches * 10
            accPerDays.append(accPerDay[:daylen])
            recallPerDays.append(recallPerDay)
            precisionPerDays.append(precisionPerDay)
            f1PerDays.append(f1PerDay)

            current_acc += acc
            current_loss += loss
            iloop += 1

        lossPerDays = torch.stack(lossPerDays)
        lossPerDays_avg = (lossPerDays.sum(dim=0)) / iloop

        accPerDays = torch.stack(accPerDays)
        accPerDays_avg = (accPerDays.sum(dim=0)) / iloop

        #recallPerDays = torch.stack(recallPerDays)
        recallPerDays_avg = torch.FloatTensor(recallPerDays).sum(dim=0) / iloop
        precisionPerDays_avg = torch.FloatTensor(precisionPerDays).sum(
            dim=0) / iloop
        f1PerDays_avg = torch.FloatTensor(f1PerDays).sum(dim=0) / iloop

        #lossPerDays_avg = lossPerDays_avg/iloop
        #accPerDays_avg = accPerDays_avg/iloop

        current_acc = current_acc / iloop
        current_loss = current_loss / iloop

    return precisionPerDays_avg, f1PerDays_avg, recallPerDays_avg, accPerDays_avg, current_acc, lossPerDays_avg, current_loss
Exemplo n.º 3
0
def train_main(args, model, train_path, criterion, optimizer):
    iloop = 0
    current_loss = 0
    all_losses = []
    batch_size = args.batch_size
    train_iter = FSIterator(train_path, batch_size)
    for input, target, mask in train_iter:  #TODO for debugging
        loss = train(args, model, input, mask, target, optimizer, criterion)
        current_loss += loss

        if (iloop + 1) % args.logInterval == 0:
            print("%d %.4f" % (iloop + 1, current_loss / args.logInterval))
            all_losses.append(current_loss / args.logInterval)
            current_loss = 0

        iloop += 1
Exemplo n.º 4
0
def test_main(args):
    batch_size = 1  # this is fixed to 1 at testing

    device = torch.device("cpu")
    model = torch.load(args.loadPath).to(device)

    testiter = FSIterator(args.test_file,
                          batch_size=batch_size,
                          just_epoch=True)

    n_totals = np.zeros(args.horizon)
    n_targets = np.zeros((2, args.horizon))
    n_corrects = np.zeros((2, args.horizon))

    for i, (x, y, xm, end_of_file) in enumerate(testiter):
        x, y = torch.tensor(x).type(torch.float32), torch.tensor(y).type(
            torch.int32)
        output, hidden = model(x, None)
        logit, pred = output.topk(1)

        for t in range(args.horizon):
            if t >= x.shape[0]: break

            n_totals[t] += 1
            n_targets[y[t].item(), t] += 1

            if pred[t].item() == y[t].item():
                n_corrects[pred[t].item(), t] += 1

    accs = []
    for i in range(args.horizon):
        if n_totals[i] != 0:
            accs.append(str((np.sum(n_corrects[:, i]) / n_totals[i])))

    accString = ','.join(accs)

    with open(args.result_out_file, "a") as fp:
        fp.write(accString)
    print(accString)
Exemplo n.º 5
0
def train_main(args):
    trainiter = FSIterator(args.tr_file, batch_size=args.batch_size)
    validiter = FSIterator(args.val_file,
                           batch_size=args.batch_size,
                           just_epoch=True)

    device = torch.device("cuda")

    # setup model
    from model import FS_MODEL1, FS_MODEL2
    model = FS_MODEL2(args.input_size, args.dim_hidden, args.output_size,
                      args.batch_size, args.n_layers).to(device)

    # define loss
    mystring = "optim." + args.optimizer
    optimizer = eval(mystring)(model.parameters(), args.lr)
    criterion = nn.NLLLoss(reduction='none')

    start = time.time()
    tr_losses = []
    val_losses = []
    current_loss = 0
    valid_loss = 0.0
    bad_counter = 0
    best_loss = -1

    for i, (tr_x, tr_y, xm, end_of_file) in enumerate(trainiter):
        tr_x, tr_y, xm = torch.FloatTensor(tr_x), torch.LongTensor(
            tr_y), torch.FloatTensor(xm)
        tr_x, tr_y, xm = Variable(tr_x).to(device), Variable(tr_y).to(
            device), Variable(xm).to(device)

        output, loss = train(model, tr_x, xm, tr_y, optimizer, criterion)
        current_loss += loss

        # print iter number, loss, prediction, and target
        if (i + 1) % args.print_every == 0:

            top_n, top_i = output.topk(1)
            print("%d (%s) %.4f" %
                  (i + 1, timeSince(start), current_loss / args.print_every))
            tr_losses.append(current_loss / args.print_every)

            current_loss = 0

        if (i + 1) % args.valid_every == 0:
            valid_loss = validate(model, validiter, device, criterion)
            print("val : {:.4f}".format(valid_loss))

            if valid_loss < best_loss or best_loss < 0:
                bad_counter = 0
                torch.save(model, args.model_out_file)
                val_losses.append(valid_loss)
                best_loss = valid_loss

            else:
                bad_counter += 1

            if bad_counter > args.patience:
                print('Early Stopping')
                break

    return tr_losses, val_losses
Exemplo n.º 6
0
            bad_counter = 0
            torch.save(model, args.saveModel)
        else:
            bad_counter += 1

        if bad_counter > patience:
            print('Early Stopping')
            break
    # Draw a sample

    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt

    save_path = os.path.join("./", args.saveDir)

    train_iter = FSIterator(train_path, batch_size)

    for input, target, mask in train_iter:
        output = model(input)
        input = input[:, :, 0].transpose(1, 0)
        output = output.squeeze().transpose(1, 0)
        mask = mask.transpose(1, 0)
        for i in range(batch_size):
            daylen = np.count_nonzero(mask[i].cpu()) - 1
            plt.plot(input[i, 1:daylen + 1].cpu())
            plt.plot(output[i, :daylen].detach().cpu())
            plt.savefig(savePath + "/" + args.fileName + str(i) + ".png")
            plt.clf()
        break
Exemplo n.º 7
0
    return current_loss / (tr_x.size(0) * i)


def timeSince(since):
    now = time.time()
    s = now - since
    m = math.floor(s / 60)
    s -= m * 60
    return '%dm %ds' % (m, s)


if __name__ == "__main__":
    batch_size = args.batch_size  #TODO: batchsize and seq_len is the issue to be addressed
    n_epoches = args.max_epochs

    trainiter = FSIterator("./data/classification.tr", batch_size=batch_size)
    validiter = FSIterator(
        "./data/classification.val", batch_size=batch_size, just_epoch=True
    )  # batchd_size 1 is recommended, since remainder is discard

    device = torch.device("cuda")

    #TODO variables need to be args
    # setup model
    from model import FS_MODEL1, FS_MODEL2
    input_size = 2
    hidden_size = args.hidden_size
    output_size = 2

    model = FS_MODEL2(input_size, hidden_size, output_size,
                      batch_size).to(device)