示例#1
0
def downloadDataAsync(method, args):
    activeThreads = []
    downloadingStarted = time.time()

    for threads in range(helpers.threadCount):
        t = threading.Thread(target=method, args=(args,))
        t.start()
        activeThreads.append(t)

    for t in activeThreads:
        t.join()

    print("Downloading is completed in " + helpers.timeSince(downloadingStarted) + "!")
示例#2
0
def downloadDataAsync(method, args):
    activeThreads = []
    downloadingStarted = time.time()

    for threads in range(helpers.threadCount):
        t = threading.Thread(target=method, args=(args, ))
        t.start()
        activeThreads.append(t)

    for t in activeThreads:
        t.join()

    print("Downloading is completed in " +
          helpers.timeSince(downloadingStarted) + "!")
示例#3
0
data = h5py.File(args.data)

start = time.time()
# train = torch.utils.data.TensorDataset(torch.ByteTensor(data['train_in'][:].astype('uint8')), 
#                                        torch.ByteTensor(data['train_out'][:].astype('uint8')))
val = torch.utils.data.TensorDataset(torch.ByteTensor(data['valid_in'][:].astype('uint8')), 
                                     torch.ByteTensor(data['valid_out'][:].astype('uint8')))
test = torch.utils.data.TensorDataset(torch.ByteTensor(data['test_in'][:].astype('uint8')), 
                                      torch.ByteTensor(data['test_out'][:].astype('uint8')))
# train_loader = torch.utils.data.DataLoader(train, batch_size=args.batch_size, shuffle=True)
# train_loader = torch.utils.data.DataLoader(train, batch_size=args.batch_size, shuffle=True, num_workers=int(args.workers))
val_loader = torch.utils.data.DataLoader(val, batch_size=args.batch_size, shuffle=False)
# val_loader = torch.utils.data.DataLoader(val, batch_size=args.batch_size, shuffle=False, num_workers=int(args.workers))
test_loader = torch.utils.data.DataLoader(test, batch_size=args.batch_size, shuffle=False)
# test_loader = torch.utils.data.DataLoader(test, batch_size=args.batch_size, shuffle=False, num_workers=int(args.workers))
print("Dataloaders generated {}".format( timeSince(start) ),file=Logger)

#criterion = torch.nn.MultiLabelSoftMarginLoss() # Loss function
criterion = torch.nn.BCEWithLogitsLoss(size_average=False)

# model_files = sorted(glob.glob('bassetnorm_*.pkl'))
# for mf in model_files:

model.eval()
losses  = []
y_score = []
y_test  = []
#val_loader.init_epoch()
for inputs, targets in test_loader:
    inputs = to_one_hot(inputs, n_dims=4).permute(0,3,1,2).squeeze().float()
    targets = targets.float()
示例#4
0
print("Reading data from file {}".format(args.data),file=Logger)
data = h5py.File(args.data)

train = torch.utils.data.TensorDataset(torch.ByteTensor(data['train_in'][:].astype('uint8')), 
                                       torch.ByteTensor(data['train_out'][:].astype('uint8')))
val = torch.utils.data.TensorDataset(torch.ByteTensor(data['valid_in'][:].astype('uint8')), 
                                     torch.ByteTensor(data['valid_out'][:].astype('uint8')))
test = torch.utils.data.TensorDataset(torch.ByteTensor(data['test_in'][:].astype('uint8')), 
                                      torch.ByteTensor(data['test_out'][:].astype('uint8')))
train_loader = torch.utils.data.DataLoader(train, batch_size=args.batch_size, shuffle=True)
# train_loader = torch.utils.data.DataLoader(train, batch_size=args.batch_size, shuffle=True, num_workers=int(args.workers))
val_loader = torch.utils.data.DataLoader(val, batch_size=args.batch_size, shuffle=False)
# val_loader = torch.utils.data.DataLoader(val, batch_size=args.batch_size, shuffle=False, num_workers=int(args.workers))
test_loader = torch.utils.data.DataLoader(test, batch_size=args.batch_size, shuffle=False)
# test_loader = torch.utils.data.DataLoader(test, batch_size=args.batch_size, shuffle=False, num_workers=int(args.workers))
print("Dataloaders generated {}".format( timeSince(start) ),file=Logger)

params = list(filter(lambda x: x.requires_grad, model.parameters()))
if args.optimizer_type == 0:
    optimizer = torch.optim.Adadelta(params, lr=args.learning_rate, rho=args.rho, weight_decay=args.weight_decay)
elif args.optimizer_type == 1:
    optimizer = torch.optim.Adam(params, lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optimizer_type == 2:
    optimizer = torch.optim.RMSprop(params, lr=args.learning_rate, alpha=args.alpha, weight_decay=args.weight_decay)

#criterion = torch.nn.MultiLabelSoftMarginLoss() # Loss function
criterion = torch.nn.BCEWithLogitsLoss(size_average=False)

start = time.time()
best_loss = np.inf
print("Begin training",file=Logger)
示例#5
0
    val_in, val_out = reshape(genxy(sl * 100, 0.25))

    model.train()
    model.set_hidden(Variable(torch.zeros(1, 1, args.hidden_size)))
    ctr = 0
    for i in range(0, len(train_out) // sl):
        inp = Variable(torch.Tensor(train_in[i * sl:(i + 1) * sl, :]))
        trg = Variable(torch.LongTensor(train_out[i * sl:(i + 1) * sl]))
        optimizer.zero_grad()
        outputs = model(inp)
        loss = criterion(outputs, trg)
        loss.backward()
        optimizer.step()
        ctr += 1
        if ctr % (len(train_out) // sl - 1) == 0:
            timenow = timeSince(start)
            print('Epoch [%d/%d], Iter [%d/%d], Time: %s, Loss: %4f' %
                  (epoch + 1, args.num_epochs, ctr, len(train_out) // sl,
                   timenow, loss.data[0]))
    #
    model.eval()
    model.set_hidden(Variable(torch.zeros(1, 1, args.hidden_size)))
    losses = []
    accs = []
    for i in range(0, len(val_out) // sl):
        inp = Variable(torch.Tensor(val_in[i * sl:(i + 1) * sl, :]))
        trg = Variable(torch.LongTensor(val_out[i * sl:(i + 1) * sl]))
        outputs = model(inp)
        loss = criterion(outputs, trg)
        losses.append(loss.data[0])
        _, preds = torch.max(outputs, 1)
示例#6
0
                model.eval()
                if (not args.freeze_models) and args.interpolated_model:
                    for member in model.members:
                        member.eval()
                x_de = Variable(x_de.data, volatile = True)
                y_pred,_ = model.predict(x_de, x_en) # bs,n_en
                correct = (y_pred == x_en) # these are the same shape and both contain a sos_token row
                no_pad = (x_en != pad_token) & (x_en != sos_token)
                print_acc_total += (correct & no_pad).data.sum() / no_pad.data.sum()

            if ctr % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_loss_total = 0
                print_acc_avg = print_acc_total / print_every
                print_acc_total = 0
                timenow = timeSince(start)
                print ('Time %s, Epoch [%d/%d], Iter [%d/%d], Loss: %.4f, Reward: %.2f, Accuracy: %.2f, PPL: %.2f' 
                    %(timenow, epoch+1, n_epochs, ctr, len(train_iter), print_loss_avg,
                        model.baseline.data[0], print_acc_avg, np.exp(print_loss_avg)))

            if ctr % plot_every == 0:
                plot_loss_avg = plot_loss_total / plot_every
                plot_loss_total = 0
                plot_losses.append(plot_loss_avg)

        val_loss_total = 0 # Validation/early stopping
        model.eval()
        if (not args.freeze_models) and args.interpolated_model:
            for member in model.members:
                member.eval()
        for batch in iter(val_iter):