Example #1
0
    def __init__(self, criterion=nn.CrossEntropyLoss()):
        super(GoogLeNet, self).__init__()
        self.criterion = criterion
        self.best_accuracy = 0

        self.pre_layers = nn.Sequential(
            nn.Conv2d(3, 192, kernel_size=3, padding=1),
            nn.BatchNorm2d(192),
            nn.ReLU(True),
        )

        self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)

        self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
        self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
        self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
        self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

        self.avgpool = nn.AvgPool2d(8, stride=1)
        self.linear = nn.Linear(1024, 10)
Example #2
0
def load_model(session, m_type, m_name, logger):
    # load the weights based on best loss
    best_dir = "best_loss"

    # check model dir
    model_path = "models/" + m_name
    path = os.path.join(model_path, best_dir)
    if not os.path.exists(path):
        raise FileNotFoundError

    if m_type == "simple":
        model = Simple(m_name, config, logger)
    elif m_type == "YOLO":
        model = YOLO(m_name, config, logger)
    elif m_type == "GAP":
        model = GAP(m_name, config, logger)
    elif m_type == "NAS":
        model = NASNET(m_name, config, logger)
    elif m_type == "INC":
        model = Inception(m_name, config, logger)
    else:
        raise ValueError

    # load the best saved weights
    ckpt = tf.train.get_checkpoint_state(path)
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        logger.log('Reloading model parameters..')
        model.restore(session, ckpt.model_checkpoint_path)

    else:
        raise ValueError('There is no best model with given model')

    return model
Example #3
0
def create_model(session, m_type, m_name, logger):
    """
    create or load the last saved model
    :param session: tf.session
    :param m_type: model type
    :param m_name: model name (equal to folder name)
    :param logger: logger
    :return: None
    """
    if m_type == "simple":
        model = Simple(m_name, config, logger)
    elif m_type == "YOLO":
        model = YOLO(m_name, config, logger)
    elif m_type == 'GAP':
        model = GAP(m_name, config, logger)
    elif m_type == 'NAS':
        model = NASNET(m_name, config, logger)
    elif m_type == 'INC':
        model = Inception(m_name, config, logger)
    else:
        raise ValueError

    ckpt = tf.train.get_checkpoint_state(model.model_dir)
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        logger.log('Reloading model parameters..')
        model.restore(session, ckpt.model_checkpoint_path)

    else:
        logger.log('Created new model parameters..')
        session.run(tf.global_variables_initializer())

    return model
nb_aug = 5
dropout = 0.4
clip = 0.01
use_val = True
archs = ["inception"]

models = {
    "vggbn":
    Vgg16BN(size=(270, 480),
            n_classes=nb_classes,
            lr=0.001,
            batch_size=batch_size,
            dropout=dropout),
    "inception":
    Inception(size=(299, 299),
              n_classes=nb_classes,
              lr=0.001,
              batch_size=batch_size),
    "resnet":
    Resnet50(size=(270, 480),
             n_classes=nb_classes,
             lr=0.001,
             batch_size=batch_size,
             dropout=dropout)
}


def train(parent_model, model_str):
    parent_model.build()
    model_fn = saved_model_path + '{val_loss:.2f}-loss_{epoch}epoch_' + model_str
    ckpt = ModelCheckpoint(filepath=model_fn,
                           monitor='val_loss',
def main():
    # create the experiments dirs
    create_dirs([Config.summary_dir, "checkpoints", "logs"])

    handlers = [
        logging.FileHandler(
            datetime.now().strftime(f"./logs/%Y-%m-%d_%H-%M-%S-Log.log")),
        logging.StreamHandler()
    ]

    logging.basicConfig(format='%(asctime)s - %(message)s',
                        level=logging.INFO,
                        handlers=handlers)

    logging.info("Started Logging")
    logging.info(
        f"Summary Directory Path: {pprint.pformat(Config.summary_dir)}")
    logging.info(f"Checkpoint Path: {pprint.pformat(Config.checkpoint_dir)}")
    logging.info(
        f"Number of cores: {pprint.pformat(Config.num_parallel_cores)}")
    logging.info(
        f"Address of GPU used for training: {pprint.pformat(Config.gpu_address)}"
    )

    logging.info(
        f"Type of DataLoader: {pprint.pformat(Config.dataloader_type)}")

    logging.info(f"Type of Model: {pprint.pformat(Config.model_type)}")
    logging.info(f"Number of Epochs: {pprint.pformat(Config.num_epochs)}")
    logging.info(f"Optimizer Type: {pprint.pformat(Config.optimizer_type)}")
    logging.info(
        f"Optimizer parameters: {pprint.pformat(Config.optim_params)}")
    logging.info(f"Scheduler Type: {pprint.pformat(Config.lr_scheduler_type)}")
    logging.info(
        f"Scheduler parameters: {pprint.pformat(Config.lr_scheduler_params)}")
    logging.info(
        f"Train/Validation split ratio: {pprint.pformat(Config.train_val_split)}"
    )
    logging.info(f"Batch size: {pprint.pformat(Config.batch_size)}")

    logging.info(
        f"Training on Subset of the data: {pprint.pformat(Config.train_on_subset)}"
    )
    logging.info(
        f"Training on Subset of size: {pprint.pformat(Config.subset_size)}")

    logging.info(
        f"Generating Patches: {pprint.pformat(Config.train_on_patches)}")
    logging.info(f"Patch size (square): {pprint.pformat(Config.patch_size)}")
    logging.info(
        f"Number of Patches: {pprint.pformat(Config.n_random_patches)}")

    logging.info(
        f"Mode for network architecture and loss: {pprint.pformat(Config.mode)}"
    )
    logging.info(
        f"Initial weight value for combined loss (single instance weight) : {pprint.pformat(Config.beta)}"
    )
    logging.info(
        f"Exponential Decay Rate for weight for combined loss (single instance weight) : {pprint.pformat(Config.beta_decay)}"
    )
    logging.info(
        f"Pooling type used for Multiple instance pooling layer: {pprint.pformat(Config.pooling)}"
    )

    # create your data generator on the CPU

    with tf.device("/cpu:0"):

        if (Config.dataloader_type.lower() == 'datasetfileloader'):
            data_loader = DatasetFileLoader.DatasetFileLoader(Config)
        else:
            data_loader = DatasetLoader.DatasetLoader(Config)

    # create tensorflow session on the GPU defined in Config file

    with tf.device(Config.gpu_address):
        bytes_in_use = BytesInUse()
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:

            # create instance of the model you want
            if (Config.model_type.lower() == 'lenet'):
                model = LeNet.LeNet(data_loader, Config)
            elif (Config.model_type.lower() == 'resnet18'):
                model = ResNet18_MI.ResNet18_MI(data_loader, Config)
            elif (Config.model_type.lower() == 'resnet50'):
                model = ResNet50_MI.ResNet50_MI(data_loader, Config)
            elif (Config.model_type.lower() == 'alexnet'):
                model = AlexNet.AlexNet(data_loader, Config)
            elif (Config.model_type.lower() == 'inception'):
                model = Inception.Inception(data_loader, Config)
            elif (Config.model_type.lower() == 'resnext'):
                model = ResNeXt_MI.ResNeXt_MI(data_loader, Config)
            else:
                model = LeNet.LeNet(data_loader, Config)

            # create tensorboard logger
            logger = DefinedSummarizer(
                sess,
                summary_dir=Config.summary_dir,
                scalar_tags=[
                    'train/loss_per_epoch', 'train/acc_per_epoch',
                    'test/loss_per_epoch', 'test/acc_per_epoch',
                    'learning_rate', 'si_weight', 'mi_weight'
                ])

            # create trainer and path all previous components to it
            trainer = MTrainer(sess, model, Config, logger, data_loader)

            print("Memory Usage:")
            print("---------------------")
            print(sess.run(bytes_in_use))

            # here you train your model
            trainer.train()
            print("Memory Usage:")
            print("---------------------")
            print(sess.run(bytes_in_use))
Example #6
0
def main():
    # create the experiments dirs
    create_dirs([Config.summary_dir, Config.checkpoint_dir, "logs"])

    handlers = [
        logging.FileHandler(
            datetime.now().strftime(f"./logs/%Y-%m-%d_%H-%M-%S-Log.log")),
        logging.StreamHandler()
    ]

    logging.basicConfig(format='%(asctime)s - %(message)s',
                        level=logging.INFO,
                        handlers=handlers)

    logging.info("Started Logging")
    logging.info(
        f"Number of cores: {pprint.pformat(Config.num_parallel_cores)}")
    logging.info(
        f"Address of GPU used for training: {pprint.pformat(Config.gpu_address)}"
    )

    logging.info(
        f"Type of DataLoader: {pprint.pformat(Config.dataloader_type)}")

    logging.info(f"Type of Model: {pprint.pformat(Config.model_type)}")
    logging.info(f"Number of Epochs: {pprint.pformat(Config.num_epochs)}")
    logging.info(f"Optimizer Type: {pprint.pformat(Config.optimizer_type)}")
    logging.info(
        f"Optimizer parameters: {pprint.pformat(Config.optim_params)}")
    logging.info(
        f"Train/Validation split ratio: {pprint.pformat(Config.train_val_split)}"
    )
    logging.info(f"Batch size: {pprint.pformat(Config.batch_size)}")

    logging.info(
        f"Training on Subset of the data: {pprint.pformat(Config.train_on_subset)}"
    )
    logging.info(
        f"Training on Subset of size: {pprint.pformat(Config.subset_size)}")

    logging.info(
        f"Generating Patches: {pprint.pformat(Config.train_on_patches)}")
    logging.info(f"Patch size (square): {pprint.pformat(Config.patch_size)}")

    # create your data generator

    with tf.device("/cpu:0"):

        if (Config.dataloader_type.lower() == 'onlinedatasetloader'):
            data_loader = OnlineDatasetLoader.OnlineDatasetLoader(Config)
        if (Config.dataloader_type.lower() == 'datasetfileloader'):
            data_loader = DatasetFileLoader.DatasetFileLoader(Config)
        else:
            data_loader = DatasetLoader.DatasetLoader(Config)

    # create tensorflow session

    with tf.Session() as sess:

        # create instance of the model you want
        if (Config.model_type.lower() == 'lenet'):
            model = LeNet.LeNet(data_loader, Config)
        elif (Config.model_type.lower() == 'resnet50'):
            model = ResNet50.ResNet50(data_loader, Config)
        elif (Config.model_type.lower() == 'alexnet'):
            model = AlexNet.AlexNet(data_loader, Config)
        elif (Config.model_type.lower() == 'inception'):
            model = Inception.Inception(data_loader, Config)
        else:
            model = LeNet.LeNet(data_loader, Config)

        # create tensorboard logger
        logger = DefinedSummarizer(sess,
                                   summary_dir=Config.summary_dir,
                                   scalar_tags=[
                                       'train/loss_per_epoch',
                                       'train/acc_per_epoch',
                                       'test/loss_per_epoch',
                                       'test/acc_per_epoch'
                                   ])

        # create trainer and path all previous components to it
        trainer = MTrainer(sess, model, Config, logger, data_loader)

        # here you train your model
        trainer.train()