예제 #1
0
def simple_img_classifier_train(
    model, dts, batch_size=512, epochs=25, lr=0.1, lr_step=11, weight_decay=0.0001
):
    train_dts = dts.get_train_dataset()
    val_dts = dts.get_val_dataset()

    p_c = torch.nn.DataParallel(model).cuda()
    criterion = losses.CrossEntropyLoss()
    optim = torch_optim.SGD(
        model.parameters(), lr, 0.9, weight_decay=weight_decay, nesterov=True
    )

    @ev_batch_to_images_labels
    def ev(_images, _labels):
        out = p_c(_images)
        logits = model.out_to_logits(out)
        PT(logits=logits)
        loss = PT(loss=criterion(logits, _labels))

    nt = NiceTrainer(
        ev,
        dts.get_loader(train_dts, batch_size),
        optim,
        printable_vars=["loss", "acc"],
        computed_variables={"acc": accuracy_calc_op()},
        lr_step_period=lr_step,
        val_dts=dts.get_loader(val_dts, batch_size),
    )

    for e in range(epochs):
        nt.train()
    nt.validate()
예제 #2
0
    def define_loss(self, dataset):
        if self.output_type == "regression":
            self.criterion = nnloss.MSELoss()
        else:
            if self.weighted_loss:
                num_samples = len(dataset)

                # distribution of classes in the dataset
                label_to_count = {n: 0 for n in range(self.number_classes)}
                for idx in list(range(num_samples)):
                    label = dataset.load_datapoint(idx)[-1]
                    label_to_count[label] += 1

                label_percentage = {
                    l: label_to_count[l] / num_samples for l in label_to_count.keys()
                }
                median_perc = median(list(label_percentage.values()))
                class_weights = [
                    median_perc / label_percentage[c] if label_percentage[c] != 0 else 0
                    for c in range(self.number_classes)
                ]
                weights = torch.FloatTensor(class_weights).to(self.device)

            else:
                weights = None

            if self.classification_loss_type == 'cross-entropy':
                self.criterion = nnloss.CrossEntropyLoss(weight=weights)
            else:
                if weights is not None:
                    raise NotImplementedErrore
                self.criterion = log_f1_micro_loss
예제 #3
0
def get_criterion(name, **kwargs):
    """
    Returns criterion by name.

    :param name: criterion name (str)
    :param kwargs: kwargs passed to criterion constructor.
    :return: corresponding criterion from torch.nn module.
    """
    return {
        'bce': loss.BCELoss(),
        'bcewithlogits': loss.BCEWithLogitsLoss(),
        'cosineembedding': loss.CosineEmbeddingLoss(),
        'crossentropy': loss.CrossEntropyLoss(),
        'hingeembedding': loss.HingeEmbeddingLoss(),
        'kldiv': loss.KLDivLoss(),
        'l1': loss.L1Loss(),
        'mse': loss.MSELoss(),
        'marginranking': loss.MarginRankingLoss(),
        'multilabelmargin': loss.MultiLabelMarginLoss(),
        'multilabelsoftmargin': loss.MultiLabelSoftMarginLoss(),
        'multimargin': loss.MultiMarginLoss(),
        'nll': loss.NLLLoss(),
        'nll2d': loss.NLLLoss2d(),
        'poissonnll': loss.PoissonNLLLoss(),
        'smoothl1': loss.SmoothL1Loss(),
        'softmargin': loss.SoftMarginLoss(),
        'tripletmargin': loss.TripletMarginLoss()
    }[name.strip().lower()]
예제 #4
0
def loss_fn(loss_fn: str = "mse"):
    """
    :param loss_fn: implement loss function for training
    :return: loss function module(class)
    """
    if loss_fn == "mse":
        return loss.MSELoss()
    elif loss_fn == "L1":
        return loss.L1Loss()
    elif loss_fn == "neg_pearson":
        return NegPearsonLoss()
    elif loss_fn == "multi_margin":
        return loss.MultiMarginLoss()
    elif loss_fn == "bce":
        return loss.BCELoss()
    elif loss_fn == "huber":
        return loss.HuberLoss()
    elif loss_fn == "cosine_embedding":
        return loss.CosineEmbeddingLoss()
    elif loss_fn == "cross_entropy":
        return loss.CrossEntropyLoss()
    elif loss_fn == "ctc":
        return loss.CTCLoss()
    elif loss_fn == "bce_with_logits":
        return loss.BCEWithLogitsLoss()
    elif loss_fn == "gaussian_nll":
        return loss.GaussianNLLLoss()
    elif loss_fn == "hinge_embedding":
        return loss.HingeEmbeddingLoss()
    elif loss_fn == "KLDiv":
        return loss.KLDivLoss()
    elif loss_fn == "margin_ranking":
        return loss.MarginRankingLoss()
    elif loss_fn == "multi_label_margin":
        return loss.MultiLabelMarginLoss()
    elif loss_fn == "multi_label_soft_margin":
        return loss.MultiLabelSoftMarginLoss()
    elif loss_fn == "nll":
        return loss.NLLLoss()
    elif loss_fn == "nll2d":
        return loss.NLLLoss2d()
    elif loss_fn == "pairwise":
        return loss.PairwiseDistance()
    elif loss_fn == "poisson_nll":
        return loss.PoissonNLLLoss()
    elif loss_fn == "smooth_l1":
        return loss.SmoothL1Loss()
    elif loss_fn == "soft_margin":
        return loss.SoftMarginLoss()
    elif loss_fn == "triplet_margin":
        return loss.TripletMarginLoss()
    elif loss_fn == "triplet_margin_distance":
        return loss.TripletMarginWithDistanceLoss()
    else:
        log_warning("use implemented loss functions")
        raise NotImplementedError(
            "implement a custom function(%s) in loss.py" % loss_fn)
예제 #5
0
    def __init__(
        self,
        cfg: DictConfig,
        log: Logger,
        train_loader: DataLoader,
        test_loader: DataLoader,
    ) -> None:
        """
        Orchestrates training process by config
        :param cfg: configuration file in omegaconf format from hydra
        :param Log: Logger instance
        :param train_loader: PyTorch DataLoader over training set
        :param test_loader: PyTorch DataLoader over validation set
        """
        self.log = log
        self.cfg = cfg
        self.train_loader = train_loader
        self.test_loader = test_loader

        # Set device
        self.device = (torch.device("cuda")
                       if torch.cuda.is_available() else torch.device("cpu"))
        self.log.info(f"Using device={self.device}")

        # Set model
        self.model = torch_model(
            self.cfg.model.arch,
            self.cfg.data.classes,
            self.cfg.model.pretrained,
            log,
            module_name=self.cfg.model.module,
        )
        self.model = self.model.to(self.device)

        # Set optimizer
        parameters = (self.cfg.optimizer.parameters if "parameters"
                      in self.cfg.optimizer else {})  # keep defaults
        self.optimizer = torch_optimizer(self.cfg.optimizer.name,
                                         self.model.parameters(), self.log,
                                         **parameters)

        # Set scheduler
        self.scheduler = None
        if "scheduler" in self.cfg:
            parameters = (self.cfg.scheduler.parameters if "parameters"
                          in self.cfg.scheduler else {})  # keep defaults
            self.scheduler = torch_scheduler(self.cfg.scheduler.name,
                                             self.optimizer, self.log,
                                             **parameters)
        if self.scheduler is None:
            self.log.info("Scheduler not specified. Proceed without")

        # Set loss function
        self.loss_function = loss.CrossEntropyLoss()
예제 #6
0
def get_task_specific_losses(source_labels,
                             source_task=None,
                             transfer_task=None):
    """
    Losses related to the task-classifier
    """
    # source_labels = source_labels.view(params.batch_size, 1)
    # one_hot_labels = torch.zeros(params.batch_size, params.num_classes).scatter_(1, source_labels.data, 1)

    task_specific_loss = 0
    if source_task is not None:
        source_criterion = loss.CrossEntropyLoss()
        source_loss = source_criterion(source_task, source_labels)
        task_specific_loss += source_loss
    if transfer_task is not None:
        transfer_criterion = loss.CrossEntropyLoss()
        transfer_loss = transfer_criterion(transfer_task, source_labels)
        task_specific_loss += transfer_loss

    return task_specific_loss
예제 #7
0
    def __init__(self,
                 model: Module,
                 train_loader: DataLoader,
                 test_loader: DataLoader,
                 device=DEFAULT_DEVICE,
                 lr=DEFAULT_LR,
                 momentum=DEFAULT_MOMENTUM,
                 epochs=DEFAULT_EPOCHS,
                 batch_size=DEFAULT_BATCH_SIZE,
                 parallelism=DEFAULT_PARALLELISM,
                 milestones=MILESTONES,
                 gamma=0.2,
                 warm_phases=WARM_PHASES,
                 criterion=loss.CrossEntropyLoss()):
        print("initialize trainer")
        # parameter pre-processing
        self.test_loader = test_loader

        if torch.cuda.device_count() > 1 and parallelism:
            print(f"using {torch.cuda.device_count()} GPUs")
            self.model = nn.DataParallel(model)
        else:
            self.model = model
        self.model.to(device)

        optimizer = optim.SGD(
            # choose whether train or not
            filter(lambda p: p.requires_grad, self.model.parameters()),
            lr=lr,
            momentum=momentum,
            weight_decay=5e-4)

        train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                         milestones=milestones,
                                                         gamma=gamma)

        # warm phases
        self.warm_phases = warm_phases
        # warmup learning rate
        self.warmup_scheduler = WarmUpLR(optimizer,
                                         len(train_loader) * self.warm_phases)

        self.hp = HyperParameter(scheduler=train_scheduler,
                                 optimizer=optimizer,
                                 criterion=criterion,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 device=device)

        self.train_loader = train_loader
        print("initialize finished")
        print(f"hyper parameter: {self.hp}")
예제 #8
0
    def __init__(self, args):
        input_size = (args.input_size, args.input_size)

        self.run_name = args.run_name
        self.input_size = input_size
        self.lr = args.learning_rate
        self.output_type = args.output_type

        network_architecture_class = InceptionSiameseNetwork
        network_architecture_transforms = get_pretrained_iv3_transforms
        if args.model_type == "light":
            network_architecture_class = LightSiameseNetwork
            network_architecture_transforms = get_light_siamese_transforms

        # define the loss measure
        if self.output_type == "regression":
            self.criterion = nnloss.MSELoss()
            self.model = network_architecture_class()
        elif self.output_type == "classification":
            self.criterion = nnloss.CrossEntropyLoss()
            self.n_classes = 4  # replace by args
            self.model = network_architecture_class(
                output_type=self.output_type, n_classes=self.n_classes
            )

        self.transforms = {}

        if torch.cuda.device_count() > 1:
            logger.info("Using {} GPUs".format(torch.cuda.device_count()))
            self.model = torch.nn.DataParallel(self.model)

        for s in ("train", "validation", "test", "inference"):
            self.transforms[s] = network_architecture_transforms(s)

        logger.debug("Num params: {}".format(len([_ for _ in self.model.parameters()])))

        self.optimizer = Adam(self.model.parameters(), lr=self.lr)
        # reduces the learning rate when loss plateaus, i.e. doesn't improve
        self.lr_scheduler = ReduceLROnPlateau(
            self.optimizer, factor=0.1, patience=10, min_lr=1e-5, verbose=True
        )
        # creates tracking file for tensorboard
        self.writer = SummaryWriter(args.checkpoint_path)

        self.device = args.device
        self.model_path = args.model_path
        self.prediction_path = args.prediction_path
        self.model_type = args.model_type
        self.is_statistical_model = args.statistical_model
        self.is_neural_model = args.neural_model
        self.log_step = args.log_step
예제 #9
0
    def __init__(self, args):
        train_config = args.outputType
        net_config = args.networkType
        n_freeze = args.numFreeze
        input_size = (args.inputSize, args.inputSize)

        assert train_config in ("soft-targets", "softmax")
        assert net_config in ("pre-trained", "full")
        self.train_config = train_config
        self.input_size = input_size
        self.lr = args.learningRate

        if train_config == "soft-targets":
            self.n_classes = 1
            self.criterion = nnloss.BCEWithLogitsLoss()
        else:
            # TODO: weights
            self.n_classes = 4
            self.criterion = nnloss.CrossEntropyLoss()

        self.transforms = {}
        if net_config == "pre-trained":
            self.model = SiameseNetwork(self.n_classes, n_freeze=n_freeze)

            for s in ("train", "val", "test"):
                self.transforms[s] = get_pretrained_iv3_transforms(s)

        else:
            self.model = build_net(input_size, self.n_classes)
            assert input_size[0] == input_size[1]
            for s in ("train", "val", "test"):
                self.transforms[s] = get_transforms(s, input_size[0])

        log.debug("Num params: {}".format(
            len([_ for _ in self.model.parameters()])))

        self.optimizer = Adam(self.model.parameters(), lr=self.lr)
        self.lr_scheduler = ReduceLROnPlateau(self.optimizer,
                                              factor=0.1,
                                              patience=10,
                                              min_lr=1e-5,
                                              verbose=True)
예제 #10
0
    def __init__(self, encoder_params, bn_params, decoder_params):
        super().__init__(self)  # Is this necessary?

        self.encoder = enc.Encoder(**encoder_params)

        bn_type = bn_params.pop('type')

        if bn_type == 'vqvae':
            self.bottleneck = bn.VQVAE(**bn_params)
        elif bn_type == 'vae':
            self.bottleneck = bn.VAE(**bn_params)
        elif bn_type == 'ae':
            self.bottleneck = bn.AE(**bn_params)
        else:
            raise InvalidArgument

        self.decoder = dec.WaveNet(**decoder_params)

        # Does the complete model need the loss function defined as well?
        self.loss = loss.CrossEntropyLoss()
예제 #11
0
def get_criterion(name, **kwargs):
    """
    Returns criterion instance given the name.

    Args:
        name (str): criterion name
        kwargs (dict): keyword arguments passed to criterion constructor

    Returns:
        Corresponding criterion from torch.nn module

    Available criteria:
        BCE, BCEWithLogits, CosineEmbedding, CrossEntropy, HingeEmbedding, KLDiv,
        L1, MSE, MarginRanking, MultilabelMargin, MultilabelSoftmargin, MultiMargin,
        NLL, PoissoNLL, SmoothL1, SoftMargin, TripletMargin

    """
    return {
        'bce': loss.BCELoss(**kwargs),
        'bcewithlogits': loss.BCEWithLogitsLoss(**kwargs),
        'cosineembedding': loss.CosineEmbeddingLoss(**kwargs),
        'crossentropy': loss.CrossEntropyLoss(**kwargs),
        'hingeembedding': loss.HingeEmbeddingLoss(**kwargs),
        'kldiv': loss.KLDivLoss(**kwargs),
        'l1': loss.L1Loss(**kwargs),
        'mse': loss.MSELoss(**kwargs),
        'marginranking': loss.MarginRankingLoss(**kwargs),
        'multilabelmargin': loss.MultiLabelMarginLoss(**kwargs),
        'multilabelsoftmargin': loss.MultiLabelSoftMarginLoss(**kwargs),
        'multimargin': loss.MultiMarginLoss(**kwargs),
        'nll': loss.NLLLoss(**kwargs),
        'poissonnll': loss.PoissonNLLLoss(**kwargs),
        'smoothl1': loss.SmoothL1Loss(**kwargs),
        'softmargin': loss.SoftMarginLoss(**kwargs),
        'tripletmargin': loss.TripletMarginLoss(**kwargs)
    }[name.strip().lower()]
예제 #12
0
    def __init__(self, args):
        input_size = (args.input_size, args.input_size)

        self.run_name = args.run_name
        self.input_size = input_size
        self.lr = args.learning_rate
        self.output_type = args.output_type
        self.test_epoch = args.test_epoch
        self.freeze = args.freeze
        self.no_augment = args.no_augment
        self.augment_type = args.augment_type
        self.weighted_loss = args.weighted_loss
        self.save_all = args.save_all
        self.probability = args.probability
        self.classification_loss_type = args.classification_loss_type
        self.disable_cuda = args.disable_cuda
        network_architecture_class = InceptionSiameseNetwork
        network_architecture_transforms = get_pretrained_iv3_transforms
        if args.model_type == "shared":
            network_architecture_class = InceptionSiameseShared
            network_architecture_transforms = get_pretrained_iv3_transforms
        elif args.model_type == "light":
            network_architecture_class = LightSiameseNetwork
            network_architecture_transforms = get_light_siamese_transforms
        elif args.model_type == "after":
            network_architecture_class = InceptionCNNNetwork
            network_architecture_transforms = get_pretrained_iv3_transforms
        elif args.model_type == "vgg":
            network_architecture_class = VggSiameseNetwork
            network_architecture_transforms = get_pretrained_vgg_transforms
        elif args.model_type == "attentive":
            network_architecture_class = AttentiveNetwork
            network_architecture_transforms = get_pretrained_attentive_transforms

        # define the loss measure
        if self.output_type == "regression":
            self.criterion = nnloss.MSELoss()
            self.model = network_architecture_class()
        elif self.output_type == "classification":
            self.number_classes = args.number_classes
            self.model = network_architecture_class(
                output_type=self.output_type,
                n_classes=self.number_classes,
                freeze=self.freeze,
            )

            if self.classification_loss_type == 'cross-entropy':
                self.criterion = nnloss.CrossEntropyLoss()
            else:
                self.criterion = log_f1_micro_loss

        self.transforms = {}

        if torch.cuda.device_count() > 1:
            logger.info("Using {} GPUs".format(torch.cuda.device_count()))
            self.model = torch.nn.DataParallel(self.model)

        for s in ("train", "validation", "test", "inference"):
            self.transforms[s] = network_architecture_transforms(
                s, self.no_augment, self.augment_type
            )

        logger.debug("Num params: {}".format(len([_ for _ in self.model.parameters()])))

        self.optimizer = Adam(self.model.parameters(), lr=self.lr)
        # reduces the learning rate when loss plateaus, i.e. doesn't improve
        self.lr_scheduler = ReduceLROnPlateau(
            self.optimizer, factor=0.1, patience=10, min_lr=1e-5, verbose=True
        )

        if not self.disable_cuda:
            self.scaler = torch.cuda.amp.GradScaler()
        else:
            self.scaler = None

        # creates tracking file for tensorboard
        self.writer = SummaryWriter(args.checkpoint_path)

        self.device = args.device
        self.model_path = args.model_path
        self.trained_model_path = args.trained_model_path
        self.prediction_path = args.prediction_path
        self.model_type = args.model_type
        self.is_statistical_model = args.statistical_model
        self.is_neural_model = args.neural_model
        self.log_step = args.log_step
예제 #13
0
        'lenet': lenet.LeNet(3, n_classes, size),
        'resnet': resnet.ResNet(depth=18, n_classes=n_classes),
    }[args.actor]
elif args.dataset in ['covtype']:
    n_features = train_x.size(1)
    actor = {
        'linear': nn.Linear(n_features, n_classes),
        'mlp': mlp.MLP([n_features, 60, 60, 80, n_classes], th.tanh)
    }[args.actor]

if args.w > 0:
    assert n_classes == 2
w = th.tensor([1 -
               args.w, args.w]) if args.w else th.full(n_classes, 1.0 /
                                                       n_classes)
cross_entropy = loss.CrossEntropyLoss(w)
if cuda:
    cross_entropy.cuda()

if cuda:
    actor.cuda()

optimizer = optim.Adam(actor.parameters(), lr=args.lr, amsgrad=True)

report(actor, -1)

# In[ ]:

for i in range(args.n_iterations):
    x, y = next(loader)
    ce = cross_entropy(actor(x), y)
예제 #14
0
def evaluate_model(results_root: Union[str, Path],
                   data_part: str = "val",
                   device: str = "cuda") -> Tuple[float, float, np.ndarray]:
    """
    The main training function
    :param results_root: path to results folder
    :param data_part: {train, val, test} partition to evaluate model on
    :param device: {cuda, cpu}
    :return: None
    """
    results_root = Path(results_root)
    logging.basicConfig(filename=results_root / f"{data_part}.log",
                        level=logging.NOTSET)
    # Setup logging and show config ьфлу
    log = logging.getLogger(__name__)
    if not log.handlers:
        log.addHandler(logging.StreamHandler())

    cfg_path = results_root / ".hydra/config.yaml"
    log.info(f"Read config from {cfg_path}")

    cfg = OmegaConf.load(str(cfg_path))
    log.debug(f"Config:\n{cfg.pretty()}")

    # Specify results paths from config
    checkpoint_path = results_root / cfg.results.checkpoints.root
    checkpoint_path /= f"{cfg.results.checkpoints.name}.pth"

    # Data
    # Specify data paths from config
    data_root = Path(cfg.data.root)
    test_path = data_root / data_part

    # Check if dataset is available
    log.info(f"Looking for dataset in {str(data_root)}")
    if not data_root.exists():
        log.error("Folder not found. Terminating. "
                  "See README.md for data downloading details.")
        raise FileNotFoundError

    base_transform = to_tensor_normalize()
    test_dataset = TinyImagenetDataset(test_path, cfg, base_transform)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=cfg.train.batch_size,
        shuffle=False,
        collate_fn=DatasetItem.collate,
        num_workers=cfg.train.num_workers,
    )

    log.info(f"Created test dataset ({len(test_dataset)}) "
             f"and loader ({len(test_loader)}): "
             f"batch size {cfg.train.batch_size}, "
             f"num workers {cfg.train.num_workers}")

    loss_function = loss.CrossEntropyLoss()
    model = torch_model(
        cfg.model.arch,
        cfg.data.classes,
        cfg.model.pretrained,
        log,
        module_name=cfg.model.module,
    )
    try:
        model.load_state_dict(torch.load(checkpoint_path, map_location="cpu"))
    except RuntimeError as e:
        log.error("Failed loading state dict")
        raise e
    except FileNotFoundError as e:
        log.error("Checkpoint not found")
        raise e
    log.info(f"Loaded model from {checkpoint_path}")
    device = (torch.device("cuda") if device == "cuda"
              and torch.cuda.is_available() else torch.device("cpu"))
    test_loss, test_acc, test_outputs = test(model, device, test_loader,
                                             loss_function, 0, log)
    log.info(f"Loss {test_loss}, acc {test_acc}")
    log.info(f"Outputs:\n{test_outputs.shape}\n{test_outputs[:5, :5]}")
    logging.shutdown()
    return test_loss, test_acc, test_outputs
예제 #15
0
from torch import optim
from torch.nn.modules import loss

from models.sequential import Sequential
from training.data_helper import get_transforms_for_sequential, load_data
from training.training_helper import train, test

NUM_EPOCHS = 5
NUM_CLASSES = 10
BATCH_SIZE = 256
LEARNING_RATE = 0.01

train_loader, validation_loader, test_loader, total_training_batches = load_data(
    'out/train', 'out/validation', 'out/test', BATCH_SIZE,
    get_transforms_for_sequential())
model = Sequential(NUM_CLASSES)
criterion = loss.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)

train(train_loader, validation_loader, test_loader, NUM_EPOCHS,
      total_training_batches, model, criterion, optimizer)
test(test_loader)
예제 #16
0
    def __init__(self, settings):
        self.settings = settings
        self.transforms = settings['APPLY_TRANSFORMATIONS'] 
        self.train_loader = None
        self.val_loader = None
        self.test_loader = None
        self.bestValidationLoss = None
        self.batch_size = settings['batch_size']
        self.learning_rate = settings['learning_rate']
        self.title = settings['title']

        if 'loaded' in settings:
            if not settings['loaded']:
                self.model = settings['MODEL'](n_class=n_class)
                self.model_name = settings['MODEL'].__name__
            else:
                self.model = settings['MODEL']
        else:
            self.model = settings['MODEL'](n_class=n_class)
            self.model_name = settings['MODEL'].__name__

            

        self.start_time = datetime.now()



        self.criterion = loss.CrossEntropyLoss()
        

        # account for VGG needing different init_weights
        transfer = (settings['title'] == 'VGG')
        if transfer:
            if 'loaded' in settings:
                if not settings['loaded']:
                    self.model.apply(init_weights_transfer)
            else:
                self.model.apply(init_weights_transfer)
        else:
            if 'loaded' in settings:
                if not settings['loaded']:
                    self.model.apply(init_weights)
            else:
                self.model.apply(init_weights)



        self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)

        use_gpu = torch.cuda.is_available()
        if use_gpu:
            self.model = self.model.cuda()
            self.computing_device = torch.device('cuda')
        else:
            self.computing_device = torch.device('cpu')

        if 'loaded' in settings:
            if not settings['loaded']:
                print(self.model_name)
            else:
                print("Loaded Model")
        else:
            print(self.model_name)

        self.load_data()