Exemple #1
0
def main():
    output_path = "/tmp/rossmann"

    preprocessed_train_path = os.path.join(output_path, 'joined.feather')
    preprocessed_test_path = os.path.join(output_path, 'joined_test.feather')
    WebFetcher.download_dataset(
        "https://f002.backblazeb2.com/file/torchlite-data/rossmann.tgz",
        output_path, True)
    if os.path.exists(preprocessed_train_path) and os.path.exists(
            preprocessed_test_path):
        train_df = pd.read_feather(preprocessed_train_path,
                                   nthreads=cpu_count())
        test_df = pd.read_feather(preprocessed_test_path, nthreads=cpu_count())
    else:
        train_df, test_df = prepare_data(output_path, preprocessed_train_path,
                                         preprocessed_test_path)

    train_df, test_df, cat_vars, card_cat_features = create_features(
        train_df, test_df)

    # -- Model parameters
    batch_size = 256
    epochs = 20

    max_log_y = np.max(train_df['Sales_log'].values)
    y_range = (0, max_log_y * 1.2)

    # /!\ Uncomment this to get a real validation set
    #train_df, val_df = tsplitter.time_split(train_df, datetime.datetime(2014, 8, 1), datetime.datetime(2014, 9, 17))
    val_df = None
    # --

    shortcut = shortcuts.ColumnarShortcut.from_data_frames(
        train_df, val_df, "Sales_log", cat_vars, batch_size, test_df)
    model = shortcut.get_stationary_model(card_cat_features,
                                          len(train_df.columns) -
                                          len(cat_vars),
                                          output_size=1,
                                          emb_drop=0.04,
                                          hidden_sizes=[1000, 500],
                                          hidden_dropouts=[0.001, 0.01],
                                          y_range=y_range)
    optimizer = optim.Adam(model.parameters())
    learner = Learner(ClassifierCore(model, optimizer, F.mse_loss))
    learner.train(epochs, [metrics.RMSPE(to_exp=True)],
                  shortcut.get_train_loader,
                  shortcut.get_val_loader,
                  callbacks=[CosineAnnealingCallback(optimizer, T_max=epochs)])
    test_pred = learner.predict(shortcut.get_test_loader)
    test_pred = np.exp(test_pred)

    # Save the predictions as a csv file
    sub_file_path = os.path.join(output_path, "submit.csv")
    to_csv(preprocessed_test_path,
           sub_file_path,
           'Id',
           'Sales',
           test_pred,
           read_format='feather')
    print("Predictions saved to {}".format(sub_file_path))
def srpgan_eval(images, generator_file, upscale_factor, use_cuda, num_workers=os.cpu_count()):
    """
    Turn a list of images to super resolution and returns them
    Args:
        num_workers (int): Number of processors to use
        use_cuda (bool): Whether or not to use the GPU
        upscale_factor (int): Either 2, 4 or 8
        images (list): List of Pillow images
        generator_file (file): The generator saved model file

    Returns:
        list: A list of SR images
    """
    netG = Generator(upscale_factor)
    learner = Learner(ClassifierCore(netG, None, None), use_cuda=use_cuda)
    ModelSaverCallback.restore_model_from_file(netG, generator_file, load_with_cpu=not use_cuda)
    eval_ds = EvalDataset(images)
    # One batch at a time as the pictures may differ in size
    eval_dl = DataLoader(eval_ds, 1, shuffle=False, num_workers=num_workers)

    images_pred = []
    predictions = learner.predict(eval_dl, flatten_predictions=False)
    tfs = transforms.Compose([
        transforms.ToPILImage(),
    ])
    for pred in predictions:
        pred = pred.view(pred.size()[1:])  # Remove batch size == 1
        images_pred.append(tfs(pred.cpu()))

    return images_pred
Exemple #3
0
def train(args):
    num_workers = os.cpu_count()
    train_loader, valid_loader = get_loaders(args, num_workers)

    model_saver = ModelSaverCallback(saved_model_dir.absolute(), args.adv_epochs, every_n_epoch=10)

    netG = Generator(args.upscale_factor)
    netG.apply(weights_init)
    netD = Discriminator((3, args.crop_size, args.crop_size))
    netD.apply(weights_init)
    optimizer_g = optim.Adam(netG.parameters(), lr=1e-4)
    optimizer_d = optim.Adam(netD.parameters(), lr=1e-4)

    # Restore models if they exists
    if args.restore_models == 1:
        model_saver.restore_models([netG, netD], saved_model_dir.absolute())
    else:
        if args.gen_epochs > 0:
            print("---------------------- Generator training ----------------------")
            callbacks = [ReduceLROnPlateau(optimizer_g, loss_step="train")]
            loss = nn.MSELoss()
            learner = Learner(ClassifierCore(netG, optimizer_g, loss))
            learner.train(args.gen_epochs, None, train_loader, None, callbacks)

    print("----------------- Adversarial (SRPGAN) training -----------------")
    callbacks = [model_saver, ReduceLROnPlateau(optimizer_g, loss_step="valid"),
                 TensorboardVisualizerCallback(tensorboard_dir.absolute())]

    g_loss = GeneratorLoss()
    d_loss = DiscriminatorLoss()
    learner = Learner(SRPGanCore(netG, netD, optimizer_g, optimizer_d, g_loss, d_loss))
    learner.train(args.adv_epochs, [SSIM(), PSNR()], train_loader, valid_loader, callbacks)
Exemple #4
0
def main():
    batch_size = 512
    epochs = 2
    root_dir = "/tmp"
    # TODO in the future use https://quiltdata.com/
    fetcher.WebFetcher.download_dataset(
        "https://f002.backblazeb2.com/file/torchlite-data/dogscats.zip",
        root_dir, True)
    root_dir = "/tmp/dogscats"
    root_dir = Path(root_dir)
    train_folder = root_dir / "train"
    val_folder = root_dir / "valid"
    test_folder = root_dir / "test"
    test_image_name = "12500.jpg"

    # Image augmentation/transformations
    transformations = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    shortcut = ImageClassifierShortcut.from_paths(
        train_folder=train_folder.absolute(),
        val_folder=val_folder.absolute(),
        test_folder=test_folder.absolute(),
        transforms=transformations,
        batch_size=batch_size)

    net = shortcut.get_resnet_model()
    # Don't optimize the frozen layers parameters of resnet
    optimizer = optim.RMSprop(filter(lambda p: p.requires_grad,
                                     net.parameters()),
                              lr=1e-3)
    loss = F.nll_loss
    metrics = [CategoricalAccuracy()]
    grad_cam_callback = ActivationMapVisualizerCallback(
        test_image_name)  # TODO finish grad_cam++ here

    learner = Learner(ClassifierCore(net, optimizer, loss))
    learner.train(epochs, metrics, shortcut.get_train_loader,
                  shortcut.get_val_loader)

    y_mapping = shortcut.get_y_mapping
    y_pred = learner.predict(shortcut.get_test_loader,
                             callbacks=[grad_cam_callback])
    heatmap = grad_cam_callback.get_heatmap
    show_test_image(test_image_name, shortcut, y_mapping, y_pred)
Exemple #5
0
def main():
    batch_size = 128
    epochs = 20
    mnist_train_data = datasets.MNIST('/tmp/data',
                                      train=True,
                                      download=True,
                                      transform=transforms.Compose([
                                          transforms.ToTensor(),
                                          transforms.Normalize((0.1307, ),
                                                               (0.3081, ))
                                      ]))
    train_loader = DataLoader(mnist_train_data,
                              batch_size,
                              shuffle=True,
                              num_workers=os.cpu_count())

    mnist_test_data = datasets.MNIST('/tmp/data',
                                     train=False,
                                     transform=transforms.Compose([
                                         transforms.ToTensor(),
                                         transforms.Normalize((0.1307, ),
                                                              (0.3081, ))
                                     ]))
    test_loader = DataLoader(mnist_test_data,
                             batch_size,
                             shuffle=False,
                             num_workers=os.cpu_count())

    net = Net()
    optimizer = optim.RMSprop(net.parameters(), lr=1e-3)
    loss = F.nll_loss

    learner = Learner(ClassifierCore(net, optimizer, loss))
    metrics = [CategoricalAccuracy()]

    learner.train(epochs, metrics, train_loader, test_loader, callbacks=None)
class MyReduceLROnPlateau(ReduceLROnPlateau):
    def on_epoch_end(self, epoch, logs=None):
        step = logs["step"]
        if step == 'validation':
            batch_logs = logs.get('batch_logs', {})
            epoch_loss = batch_logs.get('loss')
            if epoch_loss is not None:
                print('reduce lr num_bad_epochs: ', self.lr_sch.num_bad_epochs)
                self.lr_sch.step(epoch_loss, epoch)


callbacks = [
    ModelParamsLogger(),
    TensorboardVisualizerCallback(tb_dir),
    ModelSaverCallback(MODELS_DIR,
                       epochs=args.epoch,
                       every_n_epoch=args.save_every),
    MyReduceLROnPlateau(optimizer,
                        loss_step="valid",
                        factor=0.5,
                        verbose=True,
                        patience=args.patience)
]

learner = Learner(ClassifierCore(model, optimizer, loss), use_cuda=args.cuda)
learner.train(args.epoch,
              metrics,
              train_loader,
              test_loader,
              callbacks=callbacks)
Exemple #7
0
from PIL import Image
from torchlite.data.datasets.srpgan import EvalDataset
from torchlite.torch.models.srpgan import Generator
from torchlite.torch.learner import Learner
from torchlite.torch.learner.cores import ClassifierCore
from torchlite.torch.train_callbacks import ModelSaverCallback
import os
import torchvision.transforms as transforms
from torch.utils.data import DataLoader

cur_path = os.path.dirname(os.path.abspath(__file__))
saved_model_dir = efiles.create_dir_if_not_exists(
    os.path.join(cur_path, "models"))

netG = Generator(4)
learner = Learner(ClassifierCore(netG, None, None), use_cuda=True)
ModelSaverCallback.restore_model_from_file(netG,
                                           saved_model_dir / "Generator.pth",
                                           load_with_cpu=not True)


def evaluate(imorig):
    """
    Method used for inference only
    """
    img_shape = imorig.shape
    h = 200
    num_h = img_shape[0] // h
    ori_h = h * num_h
    crop_h = img_shape[0] - ori_h
    w = 200