Beispiel #1
0
def nnTrain(X_train, y_train, size_list, dropout = False, dropoutProb = 0.1, batchNorm = False, 
                optimizer = 'SGD', lr = 0.01, n_epochs = 100, LSTM = False):   

    X_train = torch.autograd.Variable(torch.Tensor(X_train.values.astype(float)))
    y_train = torch.autograd.Variable(torch.Tensor(y_train.values.astype(float)))
    
    if LSTM == True:
        X_train = X_train.view(-1, X_train.shape[0], X_train.shape[1])
        model = RNN(size_list)
    else:
        model = MLP(size_list, dropout, dropoutProb, batchNorm)
    
    
    criterion = nn.MSELoss()
    
    if optimizer == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr = lr)
    if optimizer == 'RMSprop':
        optimizer = optim.RMSprop(model.parameters(), lr = lr)
    if optimizer == 'ADAM':
        optimizer = optim.ADAM(model.parameters(), lr = lr)
    
    Train_loss = []
    
    for i in range(n_epochs):
        train_loss = train_epoch(model, optimizer, X_train, y_train, criterion)
        Train_loss.append(train_loss)
    
    return model, Train_loss
Beispiel #2
0
def main(device):

    model = {'G': Generator(),
             'D': Discriminator{}}
    optim = {'G': optim.ADAM(model['G'].parameters(), lr=0.001),
             'D': optim.ADAM(model['D'].parameters(), lr=0.001)}
    crit  = nn.MSELoss()

    model['G'].to(device)
    model['D'].to(device)
    for epoch in range(epochs):
        train()
        test()

    model['G'].to('cpu')
    model['D'].to('cpu')
    torch.save({'G': model['G'].state_dict(),
                'D': model['D'].state_dict()}, 'GAN_checkpt')
def run(rank, size, algo, epochs, learning_rate):
    torch.manual_seed(1234)
    train_set, bsz = partition_dataset()
    if torch.cuda.is_available():
        model = nn.parallel.DistributedDataParallel(Net()).float().cuda()
        print('yes')
    else:
        model = nn.parallel.DistributedDataParallel(Net()).float()
    #     model = load_model(nn.parallel.DistributedDataParallel(Net()), "best_model.pth").float()

    if algo == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=learning_rate,
                              momentum=0.5)
    elif algo == 'adam':
        optimizer = optim.ADAM(model.parameters(),
                               lr=learning_rate,
                               momentum=0.5)

    # criterion = nn.cross_entropy()
    num_batches = np.ceil(len(train_set.dataset) / float(bsz))
    best_loss = float("inf")
    for epoch in range(epochs):
        epoch_loss = 0.0
        printProgressBar(0,
                         len(train_set),
                         prefix='Progress:',
                         suffix='Complete',
                         length=50)
        for i, (data, target) in enumerate(train_set):
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()

            # We need to clear them out before each instance
            optimizer.zero_grad()

            output = model(data)

            loss = F.cross_entropy(output, target)
            epoch_loss += loss.item()
            loss.backward()
            average_gradients(model)
            optimizer.step()
            printProgressBar(i + 1,
                             len(train_set),
                             prefix='Progress:',
                             suffix='Complete',
                             length=50)

        # print in intervals
        if epoch % (epochs / 10) == 0:
            print('Rank ', dist.get_rank(), ', epoch ', epoch, ': ',
                  epoch_loss / num_batches)

    if dist.get_rank() == 0 and epoch_loss / num_batches < best_loss:
        best_loss = epoch_loss / num_batches
        path_name = algo + "_" + str(
            epochs) + "_" + learning_rate + "_" + "model.pth"
        torch.save(model.state_dict(), path_name)
        # torch.save(model.state_dict(), "best_model.pth")

        f = open("256_results.txt", "a")
        info = algo.upper() + "\t" + str(epochs) + "\t" + str(
            num_batches) + "\t" + str(learning_rate) + "\t" + str(
                best_loss) + "\n"
        f.write(info)
        f.close()
Beispiel #4
0
    def train(data):
        ''' TODO: blind data'''

        combined_data = list(zip(data.data, data.labels))
        random.shuffle(combined_data)
        all_data, all_labels = zip(*combined_data)
        train_data, train_labels = all_data[
            0:int(len(all_data) *
                  0.8)], all_labels[0:int(len(all_labels) * 0.8)]
        test_data, test_labels = all_data[int(len(all_data) * 0.8):len(
            data)], all_labels[int(len(all_labels) * 0.8):len(all_labels)]

        time_constraint = data.time_constraint

        if time_constraint == 1:
            model = MLP.Perceptron(len(data.data[0]), len(data.data[0]),
                                   len(set(data.labels)))
            loss_function = nn.CrossEntropyLoss()
            optimizer = optim.SGD(model.parameters(), lr=1)
            n_epochs = 5

        if time_constraint == 2:
            model = MLP.Perceptron(len(data.data[0]), 100,
                                   len(set(data.labels)))
            loss_function = nn.CrossEntropyLoss()
            optimizer = optim.SGD(model.parameters(), lr=0.01)
            n_epochs = 5

        if time_constraint == 3:
            model = MLP.Perceptron(len(data.data[0]), 100,
                                   len(set(data.labels)))
            loss_function = nn.CrossEntropyLoss()
            optimizer = optim.ADAM(model.parameters())
            n_epochs = 10

        if time_constraint == 4:
            model = MLP.Perceptron(len(data.data[0]), 100,
                                   len(set(data.labels)))
            loss_function = nn.CrossEntropyLoss()
            optimizer = optim.ADAM(model.parameters())
            n_epochs = 20

        if time_constraint == 5:
            model = MLP.Perceptron(len(data.data[0]),
                                   100,
                                   len(set(data.labels)),
                                   extratime=True)
            loss_function = nn.CrossEntropyLoss()
            optimizer = optim.ADAM(model.parameters())
            n_epochs = 50

        batches = batchify(train_data, train_labels)
        test_batches = batchify(test_data, test_labels, size=1)

        for epoch in range(n_epochs):
            print(epoch)
            model.train()

            for n, batch in enumerate(batches[0:1000]):

                cells = batch[0]
                lbls = batch[1]

                pred = model(torch.Tensor(cells).view(16, 38))
                model.zero_grad()
                _, predicted = torch.max(pred.data, 1)

                loss = loss_function(pred, torch.Tensor(lbls))
                loss.backward()
                optimizer.step()

            ####VALIDATION

        model.eval()
        with torch.no_grad():
            correct = 0
            for batch in test_batches:
                for cell, label in batch:
                    outputs = model(torch.from_numpy(np.asarray([cell])))
                    _, predicted = torch.max(outputs.data, 1)
                    if predicted == label:
                        correct += 1

        return -1