Пример #1
0
def train():
    dataset_train = get_train_data(100)
    dataset_valid = get_valid_data(100)

    model = DanQ()
    loss_object = keras.losses.BinaryCrossentropy()
    optimizer = keras.optimizers.Adam()
    trainer = Trainer(model=model,
                      loss_object=loss_object,
                      optimizer=optimizer,
                      experiment_dir='./result/DanQ')

    history = trainer.train(dataset_train,
                            dataset_valid,
                            epoch=60,
                            train_steps=int(np.ceil(4400000 / 100)),
                            valid_steps=int(np.ceil(8000 / 100)),
                            dis_show_bar=True)

    # Plot the loss curve of training and validation, and save the loss value of training and validation.
    print('\n history dict: ', history)
    epoch = history['epoch']
    train_loss = history['train_loss']
    val_loss = history['valid_loss']
    plot_loss_curve(epoch, train_loss, val_loss,
                    './result/DanQ/model_loss.jpg')
    np.savez('./result/DanQ/model_loss.npz',
             train_loss=train_loss,
             val_loss=val_loss)
def train():
    dataset_train = get_train_data(100)
    dataset_valid = get_valid_data(100)

    model = DanQ_JASPAR()
    # We should build the model to initial the weights because we write a custom build function.
    model.build(input_shape=(None, 1000, 4))
    loss_object = keras.losses.BinaryCrossentropy()
    optimizer = keras.optimizers.Adam()
    trainer = Trainer(
        model=model,
        loss_object=loss_object,
        optimizer=optimizer,
        experiment_dir='./result/DanQ_JASPAR')

    history = trainer.train(dataset_train, dataset_valid, epoch=32, train_steps=int(np.ceil(4400000 / 100)),
                            valid_steps=int(np.ceil(8000 / 100)), dis_show_bar=True)

    # Plot the loss curve of training and validation, and save the loss value of training and validation.
    print('\n history dict: ', history)
    epoch = history['epoch']
    train_loss = history['train_loss']
    val_loss = history['valid_loss']
    plot_loss_curve(epoch, train_loss, val_loss, './result/DanQ_JASPAR/model_loss.jpg')
    np.savez('./result/DanQ_JASPAR/model_loss.npz', train_loss=train_loss, val_loss=val_loss)
Пример #3
0
def main(verbose=False, plot=False, save=False, random_ar_param=True):
    # load configuration dicts. Could be implemented to load from JSON instead.
    data_config, model_config, train_config = load_config(
        verbose, random_ar_param)
    # loads randomly generated data. Could be implemented to load a specific dataset instead.
    data = load_data(data_config, verbose, plot)
    # runs training and testing.
    results_dar, stats_dar = run_training(data, model_config, train_config,
                                          verbose)

    # optional printing
    if verbose:
        print(stats_dar)

    # optional plotting
    if plot:
        utils.plot_loss_curve(losses=results_dar["losses"],
                              test_loss=results_dar["test_mse"],
                              epoch_losses=results_dar["epoch_losses"],
                              show=False,
                              save=save)
        utils.plot_weights(model_config["ar"],
                           results_dar["weights"],
                           data["ar"],
                           model_name="AR-Net",
                           save=save)
        utils.plot_results(results_dar, model_name="AR-Net", save=save)
Пример #4
0
def train():
    train_dataset = get_train_data(64)
    valid_data = get_valid_data()

    # Build the model.
    model = DeepSEA()
    model.compile(optimizer=tf.keras.optimizers.SGD(momentum=0.9),
                  loss=tf.keras.losses.BinaryCrossentropy())
    model.build(input_shape=(None, 1000, 4))
    model.summary()

    # Define the callbacks. (check_pointer\early_stopper\tensor_boarder)
    # For check_pointer: we save the model in SavedModel format
    # (Weights-only saving that contains model weights and optimizer status)
    check_pointer = tf.keras.callbacks.ModelCheckpoint(
        filepath='./result/model/ckpt',
        verbose=0,
        save_best_only=True,
        save_weights_only=True,
        save_freq='epoch',
        load_weights_on_restart=False)
    early_stopper = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     patience=5,
                                                     verbose=0)
    tensor_boarder = tf.keras.callbacks.TensorBoard(log_dir='./result/logs')

    # Training the model.
    history = model.fit(
        train_dataset,
        epochs=60,
        steps_per_epoch=4400000 / 64,
        verbose=2,
        validation_data=valid_data,
        validation_steps=8000 / 64,
        callbacks=[check_pointer, early_stopper, tensor_boarder])

    # Plot the loss curve of training and validation, and save the loss value of training and validation.
    print('\n history dict: ', history.history)
    train_loss = history.history['loss']
    val_loss = history.history['val_loss']
    plot_loss_curve(train_loss, val_loss, './result/model_loss.jpg')
    np.savez('./result/model_loss.npz',
             train_loss=train_loss,
             val_loss=val_loss)
Пример #5
0
def main(args):
    '''Load MNIST'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    save_dir = Path(args.result_dir)
    save_dir.mkdir(exist_ok=True)
    INPUT_SIZE = args.input_size
    transforms = torchvision.transforms.Compose([
        torchvision.transforms.RandomCrop(INPUT_SIZE[1:], padding=2),
        torchvision.transforms.ToTensor(),
    ])
    trn_dataset = torchvision.datasets.MNIST('.',
                                             train=True,
                                             download=True,
                                             transform=transforms)
    tst_dataset = torchvision.datasets.MNIST('.',
                                             train=False,
                                             download=True,
                                             transform=transforms)
    print('Images for training: %d' % len(trn_dataset))
    print('Images for testing: %d' % len(tst_dataset))

    BATCH_SIZE = args.batchsize  # Batch size not specified in the paper
    trn_loader = torch.utils.data.DataLoader(trn_dataset,
                                             BATCH_SIZE,
                                             shuffle=True)
    tst_loader = torch.utils.data.DataLoader(tst_dataset,
                                             BATCH_SIZE,
                                             shuffle=False)

    model = CapsNet(INPUT_SIZE).cuda()
    if args.pretrain is not None:
        print('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        model.load_state_dict(checkpoint['model_state_dict'])

    print(model)
    print('Number of Parameters: %d' % model.n_parameters())

    criterion = CapsNetLoss()

    LEARNING_RATE = args.learning_rate
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=LEARNING_RATE,
                                 betas=(0.9, 0.999),
                                 eps=1e-08)

    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    history = defaultdict(lambda: list())
    COMPUTE_TRN_METRICS = args.train_metric
    '''Training'''
    for epoch in range(int(args.epoch)):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))

        for batch_id, (x, y) in tqdm(enumerate(trn_loader),
                                     total=len(trn_loader),
                                     smoothing=0.9):
            optimizer = exponential_decay(
                optimizer, LEARNING_RATE, global_epoch, 1,
                0.90)  # Configurations not specified in the paper

            x = Variable(x).float().cuda()
            y = Variable(y).cuda()

            y_pred, x_reconstruction = model(x, y)
            loss, margin_loss, reconstruction_loss = criterion(
                x, y, x_reconstruction, y_pred.cuda())

            history['margin_loss'].append(margin_loss.cpu().data.numpy())
            history['reconstruction_loss'].append(
                reconstruction_loss.cpu().data.numpy())
            history['loss'].append(loss.cpu().data.numpy())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            global_step += 1

        trn_metrics = test(model, trn_loader) if COMPUTE_TRN_METRICS else None
        tst_metrics = test(model, tst_loader)

        print('Margin Loss: %f' % history['margin_loss'][-1])
        print('Reconstruction Loss: %f' % history['reconstruction_loss'][-1])
        print('Loss: %f' % history['loss'][-1])
        if COMPUTE_TRN_METRICS:
            print('Train Accuracy: %f' % (trn_metrics['accuracy']))
        print('Test Accuracy: %f' % tst_metrics['accuracy'])

        idx = np.random.randint(0, len(x))
        show_example(x[idx], y[idx], x_reconstruction[idx], y_pred[idx],
                     args.result_dir, 'Epoch_{}'.format(epoch))

        if (tst_metrics['accuracy'] >= best_tst_accuracy):
            best_tst_accuracy = tst_metrics['accuracy']
            save_checkpoint(
                global_epoch + 1,
                trn_metrics['accuracy'] if COMPUTE_TRN_METRICS else 0.0,
                tst_metrics['accuracy'], model, optimizer)
        global_epoch += 1

    n_points_avg = 10
    n_points_plot = 1000
    plt.figure(figsize=(20, 10))
    '''Loss Curve'''
    plot_loss_curve(history, n_points_avg, n_points_plot, args.result_dir)
Пример #6
0
                    'optimizer': optimizer.state_dict(),
                }, os.path.join(opt.outf, 'model_best_test.pth.tar'))

        if epoch_item % 10 == 0:
            filename = os.path.join(opt.outf, 'epoch_%d.pth.tar' % epoch_item)
            utils.save_checkpoint(
                {
                    'epoch_index': epoch_item,
                    'arch': opt.basemodel,
                    'state_dict': model.state_dict(),
                    'best_prec1_test': best_prec1_test,
                    'optimizer': optimizer.state_dict(),
                }, filename)

    # ======================================= Plot Loss Curves =======================================
    utils.plot_loss_curve(opt, Train_losses, Val_losses, Test_losses)
    print(
        '======================================== Training is END ========================================\n'
    )
    print(
        '======================================== Training is END ========================================\n',
        file=F_txt)
    F_txt.close()

    # ============================================ Test phase ============================================
    # Set the save path
    F_txt_test = utils.set_save_test_path(opt)
    print(
        '========================================== Start Test ==========================================\n'
    )
    print(
Пример #7
0
def train(train_data, use_asymm_gen_loss=True, use_gpu=False):
    """

    :param train_data: np.ndarray of shape (20000, 1)
    :param use_asymm_gen_loss: bool
    :param use_gpu: bool
    :return:
    """
    """ Build training configurations """

    hp = dict(n_epochs=20, batch_size=64, n_disc_updates=2)
    hp = EasyDict(hp)

    constant = dict(device=torch.device("cpu" if not use_gpu else "cuda:0"))
    constant = EasyDict(constant)
    if use_gpu:
        torch.cuda.set_device(constant.device)
    """ Build data loader and data processor function """

    train_loader = data.DataLoader(dataset=train_data,
                                   batch_size=hp.batch_size,
                                   shuffle=True)
    """ Build networks """

    gen = Generator().to(constant.device)
    disc = Discriminator().to(constant.device)
    """ Build optimizers """

    optimizer_g = torch.optim.Adam(gen.parameters(), lr=1e-4, betas=(0, 0.9))
    optimizer_d = torch.optim.Adam(disc.parameters(), lr=1e-4, betas=(0, 0.9))
    """ Build loss functions """
    def disc_loss_fn(real, fake):
        return (-disc(real.detach()).log().mean() -
                (1 - disc(fake.detach())).log().mean())

    if use_asymm_gen_loss:

        def gen_loss_fn(real, fake):
            return -disc(fake).log().mean()

    else:

        def gen_loss_fn(real, fake):
            return (1 - disc(fake)).log().mean()

    """ Traning loop """

    history = dict(losses=[])
    for epoch in range(hp.n_epochs):

        losses_one_epoch = train_one_epoch(
            n_disc_updates=hp.n_disc_updates,
            batch_iterator=train_loader,
            process_batch_fn=process_batch_fn,
            gen=gen,
            disc=disc,
            optimizer_g=optimizer_g,
            optimizer_d=optimizer_d,
            gen_loss_fn=gen_loss_fn,
            disc_loss_fn=disc_loss_fn,
            device=constant.device,
            # max_n_iterations=1,  # uncomment this line if trying to debug
        )
        history["losses"].extend(losses_one_epoch)

        print(f"Epoch {epoch}: loss = {np.mean(losses_one_epoch)}")

        if epoch == 0 or epoch == hp.n_epochs - 1:
            fake, disc_in, disc_out = eval_one_epoch(gen=gen,
                                                     disc=disc,
                                                     device=constant.device)
            plot_eval(
                real=train_data,
                fake=fake,
                disc_in=disc_in,
                disc_out=disc_out,
                epoch=epoch,
            )

    history["losses"] = torch.stack(history["losses"]).to("cpu").numpy()

    plot_loss_curve(
        history["losses"],
        title="train_minimal",
        save_to=os.path.join(DATA_DIR, "train_minimal_loss"),
    )
Пример #8
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    MESH_SIZE = args.mesh_size
    BATCHSIZE = args.batchsize
    LEARNING_RATE = args.learning_rate
    INPUT_SIZE = (1, MESH_SIZE, MESH_SIZE, MESH_SIZE)
    EPOCH = args.epoch
    COMPUTE_TRAIN_METRICS = args.train_metric
    ROUNTING_ITER = args.n_routing_iter
    DATA_PATH = args.data_path
    NUM_CLASS = args.num_class
    DECAY_RATE = args.decay_rate
    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]), int(args.rotation[3:5]))
    else:
        ROTATION = None
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    result_dir = Path(args.result_dir)
    result_dir.mkdir(exist_ok=True)
    checkpoints_dir = Path('./experiment/checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = Path(args.log_dir)
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("PointCapsNet")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        args.log_dir + 'train-' +
        str(datetime.datetime.now().strftime('%Y-%m-%d %H-%M')) + '.txt')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(DATA_PATH)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])
    trainDataset = myDataset(train_data,
                             train_label,
                             meshmode="density",
                             rotation=ROTATION)
    if ROTATION is not None:
        print('The range of training rotation is', ROTATION)
    testDataset = myDataset(test_data,
                            test_label,
                            meshmode="density",
                            rotation=ROTATION)
    trainDataLoader = torch.utils.data.DataLoader(trainDataset,
                                                  batch_size=BATCHSIZE,
                                                  shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=BATCHSIZE,
                                                 shuffle=False)
    '''MODEL LOADING'''
    model = PointCapsNet(INPUT_SIZE, NUM_CLASS, ROUNTING_ITER).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    print(model)
    print('Number of Parameters: %d' % model.n_parameters())

    criterion = PointCapsNetLoss()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=LEARNING_RATE,
                                 betas=(0.9, 0.999),
                                 eps=1e-08)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    history = defaultdict(lambda: list())
    '''TRANING'''
    logger.info('Start training...')
    total_train_acc = []
    total_test_acc = []
    for epoch in range(start_epoch, EPOCH):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, EPOCH))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1, EPOCH)

        for batch_id, (x, y) in tqdm(enumerate(trainDataLoader),
                                     total=len(trainDataLoader),
                                     smoothing=0.9):
            optimizer = exponential_decay(optimizer, LEARNING_RATE,
                                          global_epoch, 1, DECAY_RATE)

            x = Variable(x).float().cuda()
            y = Variable(y.squeeze()).cuda()
            y_pred, x_reconstruction = model(x, y)

            loss, margin_loss, reconstruction_loss = criterion(
                x, y, x_reconstruction, y_pred.cuda(), NUM_CLASS)

            history['margin_loss'].append(margin_loss.cpu().data.numpy())
            history['reconstruction_loss'].append(
                reconstruction_loss.cpu().data.numpy())
            history['loss'].append(loss.cpu().data.numpy())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            global_step += 1

        train_metrics, train_hist_acc = test(
            model, trainDataLoader) if COMPUTE_TRAIN_METRICS else (None, [])
        test_metrics, test_hist_acc = test(model, testDataLoader)
        total_train_acc += train_hist_acc
        total_test_acc += test_hist_acc

        print('Margin Loss: %f' % history['margin_loss'][-1])
        logger.info('Margin Loss: %f', history['margin_loss'][-1])
        print('Reconstruction Loss: %f' % history['reconstruction_loss'][-1])
        logger.info('Reconstruction Loss: %f',
                    history['reconstruction_loss'][-1])
        print('Loss: %f' % history['loss'][-1])
        logger.info('Loss: %f', history['loss'][-1])
        if COMPUTE_TRAIN_METRICS:
            print('Train Accuracy: %f' % (train_metrics['accuracy']))
            logger.info('Train Accuracy: %f', (train_metrics['accuracy']))
        print('Test Accuracy: %f' % test_metrics['accuracy'])
        logger.info('Test Accuracy: %f', test_metrics['accuracy'])

        # TODO show reconstruction mesh
        # idx = np.random.randint(0, len(x))
        # show_example(x[idx], y[idx], x_reconstruction[idx], y_pred[idx], args.result_dir, 'Epoch_{}'.format(epoch))

        if (test_metrics['accuracy'] >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = test_metrics['accuracy']
            logger.info('Save model...')
            save_checkpoint(
                global_epoch + 1,
                train_metrics['accuracy'] if COMPUTE_TRAIN_METRICS else 0.0,
                test_metrics['accuracy'], model, optimizer,
                str(checkpoints_dir))
        global_epoch += 1

    logger.info('End of training...')
    n_points_avg = 10
    n_points_plot = 1000
    plt.figure(figsize=(20, 10))
    plot_loss_curve(history, n_points_avg, n_points_plot, str(result_dir))
    plot_acc_curve(total_train_acc, total_test_acc, str(result_dir))
Пример #9
0
json_str3 = json.dumps(tr_los_dict, indent=4)
json_str4 = json.dumps(te_los_dict, indent=4)
if not os.path.exists('results'):
    os.mkdir('results')
with open('results/tr_acc_epoch.json', 'w') as json_file:
    json_file.write(json_str1)
with open('results/te_acc_epoch.json', 'w') as json_file:
    json_file.write(json_str2)
with open('results/tr_los_epoch.json', 'w') as json_file:
    json_file.write(json_str3)
with open('results/te_los_epoch.json', 'w') as json_file:
    json_file.write(json_str4)

# plot the accuracy and loss figures
plot_accuracy_curve(train_accuracy, test_accuracy, show=True)
plot_loss_curve(train_loss, test_loss, show=True)
# plot confusion matrix of the predicted results of the latest model
net.eval()
predict_list = np.array([]).astype(int)
label_list = np.array([]).astype(int)
with torch.no_grad():
    for images, labels in test_loader:
        images = Variable(images.to(device))
        labels = Variable(labels.to(device))
        outputs = net(images)
        predict_y = torch.max(outputs, dim=1)[1]
        predict_list = np.hstack((predict_list, predict_y.cpu().numpy()))
        label_list = np.hstack((label_list, labels.cpu().numpy()))
cm = get_confusion_matrix(classes, predict_list, label_list)
plot_confusion_matrix(cm, classes, title='Confusion matrix', normalize=True)
Пример #10
0
def train(train_data, use_gpu=False):
    """

    :param train_data: np.array of shape (None, 3, 32, 32) with values in [0, 1]
    :param use_gpu:
    :return:
    """

    """ Build training configurations """

    hp = dict(
        n_iterations=25000,
        batch_size=256,
        n_disc_updates=5,
        lmbda=10,
    )
    hp = EasyDict(hp)

    constant = dict(device=torch.device("cpu" if not use_gpu else "cuda:0"))
    constant = EasyDict(constant)
    if use_gpu:
        torch.cuda.set_device(constant.device)

    """ Build data loader and data processor function """

    train_loader = data.DataLoader(dataset=train_data, batch_size=hp.batch_size, shuffle=True)
    n_batches = len(train_loader)
    hp.n_epochs = hp.n_iterations // n_batches
    hp.n_iterations = hp.n_epochs * n_batches
    print('n_epochs', hp.n_epochs, 'n_iterations', hp.n_iterations)

    """ Build networks """

    gen = Generator().to(constant.device)
    disc = Discriminator().to(constant.device)

    """ Build optimizers """

    optimizer_g = torch.optim.Adam(gen.parameters(), lr=2e-4, betas=(0, 0.9))
    optimizer_d = torch.optim.Adam(disc.parameters(), lr=2e-4, betas=(0, 0.9))

    """ Build loss functions """

    def disc_loss_fn(real, fake):
        current_batch_size = real.shape[0]
        real, fake = real.detach(), fake.detach()
        eps = torch.randn(current_batch_size, 1, 1, 1).to(constant.device)
        x_hat = (eps * real + (1 - eps) * fake).requires_grad_()

        disc_out = disc(x_hat)
        original_disc_loss = disc_out.mean() - disc(real).mean()

        grad, = torch.autograd.grad(
            outputs=[disc_out.mean(), ],
            inputs=x_hat,
            create_graph=True,
            retain_graph=True,
        )
        grad_penalty = (grad.norm() - 1).square()
        return original_disc_loss + hp.lmbda * grad_penalty

    def gen_loss_fn(real, fake):
        return -disc(fake).log().mean()

    """ Build learning rate schedulers """

    max_n_iterations = max(hp.n_iterations, 25000)
    scheduler_g = torch.optim.lr_scheduler.LambdaLR(
        optimizer=optimizer_g,
        lr_lambda=lambda itr: (max_n_iterations - itr) / max_n_iterations,
        last_epoch=-1
    )
    scheduler_d = torch.optim.lr_scheduler.LambdaLR(
        optimizer=optimizer_d,
        lr_lambda=lambda itr: (max_n_iterations - itr) / max_n_iterations,
        last_epoch=-1,
    )

    """ Training loop """

    history = dict(
        losses=[],
    )

    for epoch in tqdm(range(hp.n_epochs)):
        losses_one_epoch = train_one_epoch(
            n_disc_updates=hp.n_disc_updates,
            batch_iterator=train_loader,
            process_batch_fn=process_batch_fn,
            gen=gen, disc=disc, optimizer_g=optimizer_g, optimizer_d=optimizer_d,
            gen_loss_fn=gen_loss_fn,
            disc_loss_fn=disc_loss_fn,
            device=constant.device,
            scheduler_g=scheduler_g, scheduler_d=scheduler_d,
            # max_n_iterations=1,  # debug
        )
        history['losses'].extend(losses_one_epoch)

        print(f"Epoch {epoch}: loss = {np.mean(losses_one_epoch)}")

        if epoch == hp.n_epochs - 1:
            fake = eval_one_epoch(gen=gen, disc=disc, device=constant.device)
            plot_eval(fake=fake, epoch=epoch)

    history['losses'] = torch.stack(history['losses']).to('cpu').numpy()

    plot_loss_curve(
        history["losses"],
        title="train_cifar10",
        save_to=os.path.join(DATA_DIR, "train_cifar10_loss"),
    )