Esempio n. 1
0
def main():
    # TODO: shift to a config file
    parser = argparse.ArgumentParser()
    parser.add_argument("--filepath", help="directory containing data")
    parser.add_argument("--filename", help="file containing data")
    parser.add_argument("--remove_first_column",
                        help="Remove first column of data")
    parser.add_argument("--epochs", help="total number of epochs")
    parser.add_argument("--batch_size", help="size of a batch")
    parser.add_argument("--learning_rate", help="learning rate if using SGD")
    args = parser.parse_args()

    train_x, train_y, test_x, test_y = get_data(args.filepath, args.filename)
    train_ds = Dataset(train_x, train_y)  # a dataset just gives a __getitem__
    test_ds = Dataset(test_x, test_y)

    train_dl = DataLoader(dataset=train_ds, batch_size=int(args.batch_size))
    # a dataloader does 2 things: shuffles the data and convert values to a tensor
    test_dl = DataLoader(dataset=test_ds, batch_size=2 * int(args.batch_size))

    no_of_classes = 1 + int(test_y.max().item())
    optim = SGDOptimizerForModules(float(args.learning_rate))
    model = LogisticClassification([len(train_x[0]), 100],
                                   [100, no_of_classes], optim)
    epochs = int(args.epochs)
    callbacks = [
        LossAndAccuracyCallback(),
        LrScheduler(cosine_schedule(float(args.learning_rate), 0.001)),
        LrRecorder()
    ]
    runner = Runner(model, train_dl, test_dl, callbacks)
    runner.fit(epochs)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--filepath", help="directory containing the file")
    parser.add_argument("--filename", help="file containing data")
    parser.add_argument("--epochs", type=int, help="total number of epochs")
    parser.add_argument("--batch_size", type=int, help="size of a batch")
    parser.add_argument("--learning_rate",
                        type=float,
                        help="learning rate if using SGD")
    args = parser.parse_args()

    train_x, train_y, test_x, test_y = get_data(args.filepath, args.filename)
    train_ds = Dataset(train_x, train_y)
    test_ds = Dataset(test_x, test_y)

    train_dl = DataLoader(dataset=train_ds, batch_size=args.batch_size)
    test_dl = DataLoader(dataset=test_ds, batch_size=2 * args.batch_size)

    no_of_classes = 1 + (train_y.max().item())
    model = LogisticClassification([len(train_x[0]), 100], [100, 50],
                                   [50, no_of_classes])
    optim = Optimizer(model.trainable_params, args.learning_rate)
    callbacks = [
        LossAndAccuracyCallback(),
        LrScheduler(cosine_schedule(args.learning_rate, 0.01))
    ]
    runner = RunnerPytorch(model, optim, train_dl, test_dl, callbacks)

    runner.fit(args.epochs)
    # This is the problem with individual param hooks, will have to plot them seperately
    # Its better to get gradients for entire layers

    #TODO: add better plots

    # Mean of gradients of W
    plt.plot(model.layer1.grads_mean[0], label="Layer_1_W")
    plt.plot(model.layer2.grads_mean[0], label="Layer_2_W")
    plt.plot(model.layer3.grads_mean[0], label="Layer_3_W")
    plt.legend(loc="upper right")
    plt.title("Means of Weights")
    plt.show()
    plt.close()

    # Variances of gradients of W
    plt.plot(model.layer1.grads_variance[0], label="Layer_1_W")
    plt.plot(model.layer2.grads_variance[0], label="Layer_2_W")
    plt.plot(model.layer3.grads_variance[0], label="Layer_3_W")
    plt.legend(loc="upper right")
    plt.title("Variances of Weights")
    plt.show()
Esempio n. 3
0
def main():
    # TODO: shift to a config file
    parser = argparse.ArgumentParser()
    parser.add_argument("--filepath", help="directory containing data")
    parser.add_argument("--filename", help="file containing data")
    parser.add_argument("--remove_first_column",
                        help="Remove first column of data")
    parser.add_argument("--epochs", help="total number of epochs")
    parser.add_argument("--batch_size", help="size of a batch")
    parser.add_argument(
        "--update_rule",
        help=
        "Matrix or SGD for normal form update or stochastic gradient descent")
    parser.add_argument("--learning_rate", help="learning rate if using SGD")
    parser.add_argument(
        "--loss_type",
        help="MSE or ML for mean squared error or maximum likelihood")
    parser.add_argument("--regularization", help="L1 or L2 regularization")
    parser.add_argument("--regularization_constant",
                        help="regularization constant")
    args = parser.parse_args()

    train_x, train_y, test_x, test_y = get_data(args.filepath, args.filename)
    train_ds = Dataset(train_x, train_y)  # a dataset just gives a __getitem__
    test_ds = Dataset(test_x, test_y)

    train_dl = DataLoader(dataset=train_ds, batch_size=int(args.batch_size))
    # a dataloader does 2 things: shuffles the data and convert values to a tensor
    test_dl = DataLoader(dataset=test_ds, batch_size=2 * int(args.batch_size))
    torch.autograd.set_detect_anomaly(True)
    no_of_classes = 1 + int(test_y.max().item())
    # optim = Optimizer(model.trainable_params,float(args.learning_rate))
    optim = SGDOptimizerForModules(float(args.learning_rate))
    model = LogisticClassification([len(train_x[0]), 100], [100, 50],
                                   [50, no_of_classes], optim)
    epochs = int(args.epochs)
    batch_size = int(args.batch_size)
    for i in range(epochs):
        for j, (x, y) in enumerate(train_dl):
            print(" Train error for epoch " + str(i) + " and batch " +
                  str(j + 1) + " : ")
            model.forward(x, y)
            model.backward()

        for j, (x, y) in enumerate(test_dl):
            print(" Test error for epoch " + str(i) + " and batch " +
                  str(j + 1) + " : ")
            with torch.no_grad():
                model.forward(x, y)
Esempio n. 4
0
def loadData():

    #Create the dataset
    dataset = Dataset(csv_file = 'csv.csv', \
                     transform = transform.Compose([transform.Resize((100,100)), \
                                                    transform.ToTensor(),\
                                                   transform.Normalize((0.5, 0.5, 0.5), \
                                                                        (0.5, 0.5, 0.5))]))

    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split1 = int(np.floor(.6 * dataset_size))
    split2 = int(np.floor(.8 * dataset_size))
    train_indices, val_indices, test_indices = indices[:split1], indices[
        split1:split2], indices[split2:]

    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(val_indices)
    test_sampler = SubsetRandomSampler(test_indices)

    trainloader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              sampler=train_sampler)
    valloader = torch.utils.data.DataLoader(dataset,
                                            batch_size=batch_size,
                                            sampler=valid_sampler)
    testloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             sampler=test_sampler)

    return trainloader, valloader, testloader
Esempio n. 5
0
def train():
    train_generator, train_steps_per_epoch, val_generator, val_steps_per_epoch = Dataset.create_generators(
        '/home/xzhang/kerasLab/3D_MRI_Classification/Data',
        24,
        validation_split=0.2,
        shuffle_train_val=True,
        shuffle=True,
        seed=0)

    images, labels = next(train_generator)
    _, height, width, length, channels = images.shape
    # print(images.shape, labels)
    classes_num = 2  #len(set(labels.flatten()))

    model = Models.dilated_densenet(height=height,
                                    width=width,
                                    length=length,
                                    channels=channels,
                                    classes=classes_num,
                                    features=32,
                                    depth=2,
                                    padding='same',
                                    temperature=1.0,
                                    batchnorm=False,
                                    dropout=0.0)
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    callbacks = []
    # x_train, y_train = Dataset.load_images('/home/xzhang/kerasLab/3D_MRI_Classification/Data')
    # y_train = np_utils.to_categorical(y_train, 2)
    model.fit_generator(train_generator,
                        epochs=50,
                        steps_per_epoch=train_steps_per_epoch,
                        validation_data=val_generator,
                        validation_steps=val_steps_per_epoch,
                        callbacks=callbacks,
                        verbose=2)

    # y_train = np_utils.to_categorical(y_train, 2)
    # print(y_train)
    # model.fit(x_train, y_train, batch_size=32, epochs=20, verbose=2)

    return 0
Esempio n. 6
0
    #### Save ####
    torch.save(net.state_dict(), PATH_TO_MODEL)

    print_time("Training time :", debug_level, "TRAIN", time_start, 3)

    return


if __name__ == "__main__":
    #### Debug settings ####
    PRINT_LEVEL = "TRAIN"  # Possible modes : DEBUG, INFO, RUN, TRAIN
    print_info("Starting training with debug level : " + PRINT_LEVEL,
               PRINT_LEVEL, "TRAIN")

    #### Pytorch settings ####
    torch.set_default_tensor_type(torch.FloatTensor)
    print_info("Working device : " + str(DEVICE), PRINT_LEVEL, "INFO")

    #### Net ####
    Net = DDSPNet().float()
    Net = Net.to(DEVICE)

    #### Data ####
    Dataset = Dataset()
    Dataloader = DataLoader(Dataset,
                            batch_size=BATCH_SIZE,
                            shuffle=SHUFFLE_DATALOADER)

    #### Train ####
    train(Net, Dataloader, NUMBER_EPOCHS, PRINT_LEVEL)
Esempio n. 7
0
        train_states = torch.load(FILEPATH_MODEL_LOAD)
        model_ft.load_state_dict(train_states['train_states_latest']['model_state_dict'])
        optimizer_ft.load_state_dict(train_states['train_states_latest']['optimizer_state_dict'])
        train_states_best = train_states['train_states_best']
        loss_valid_min = train_states_best['loss_valid_min']
        model_save_criteria = train_states_best['model_save_criteria']
    else:
        train_states = {}
        model_save_criteria = np.inf

    nEpochs = 100
    batchSize = 10

    transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                         std=[0.229, 0.224, 0.225])])
    train_dataset = Dataset(train_dir, trainImages, transform=transform)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batchSize, shuffle=True)

    valid_dataset = Dataset(valid_dir, validImages, transform=transform)
    valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=batchSize, shuffle=False)

    loss_train = []
    loss_valid = []

    for epoch in range(nEpochs):
        running_loss = 0
        # epoch_accuracy = 0
        running_time_batch = 0
        time_batch_start = time.time()
        model_ft.train()
        print("training...")
Esempio n. 8
0
from network import UnetPP
from loss import LosswithLogit

parser = argparse.ArgumentParser()

parser.add_argument('--batch_size', '-bs', type=int, default=1)
parser.add_argument('--log_dir', type=str, default='log')
parser.add_argument('-lr', '--learning_rate', type=float, default=1e-4)

args = parser.parse_args()

set_logging_path(
    path=join(args.log_dir, 'log{}.txt'.format(datetime.now().strftime(
        "%Y-%m-%d-%H-%M-%S"))))

trainloader = data.DataLoader(dataset=Dataset(root="./data/stage1_train_neat"),
                              batch_size=args.batch_size,
                              shuffle=True)
testloader = data.DataLoader(dataset=Dataset(root="./data/stage1_test_neat"),
                             batch_size=args.batch_size,
                             shuffle=False)

device = torch.device("cuda:0")
model = UnetPP()
model.to(device)

criterion = LosswithLogit()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

#lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=, gamma=)
Esempio n. 9
0
label_dir = r'C:\CS8395_DLMIC\data\assignment1_data\labels'
label_txt = glob.glob(os.path.join(label_dir, '*els.txt'))

model = ResNet_PC().to(device)
torch.cuda.manual_seed(1)
model.apply(weights_init)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
criterion = nn.MSELoss().cuda()

with open(os.path.join(label_txt[0]), 'r') as f:
    filenames = f.readlines()
filenames = [item.strip() for item in filenames]

transform = transforms.ToTensor()

train_dataset = Dataset(train_dir, filenames, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=4,
                                           shuffle=True)
# model = ResNet_PC().to(device)
# model.apply(weights_init)
if __name__ != "__main__":
    input = torch.rand([4, 3, 326, 490]).float().to(device)
    # print(input)
    # print(model)
    output = model(input)
    print("Output shape: ", output.shape)  #Output shape:  torch.Size([4, 2])

for tBatchIdx, sample in enumerate(train_loader):
    print("training...")
    # time_batch_load = time.time() - time_batch_start