Beispiel #1
0
def train():

    if os.path.exists("deepannotate.h5"):
        model = tf.keras.models.load_model("deepannotate.h5")
    else:
        model = M.get_model_cls(common.NUM_POINT, common.NUM_CLASSES[2], True)
        model.compile(optimizer=tf.keras.optimizers.Adam(0.0001),
                      loss=tf.keras.losses.SparseCategoricalCrossentropy(
                          from_logits=True),
                      metrics=[
                          tf.keras.metrics.sparse_categorical_accuracy,
                          tf.keras.metrics.SparseTopKCategoricalAccuracy(k=2)
                      ])
    model.summary()

    input_data = D.get_cls_train_dataset()
    #input_data = input_data.shuffle(buffer_size=32*382)
    input_data = input_data.batch(32)

    eval_data = D.get_cls_eval_dataset()
    eval_data = eval_data.batch(32)

    log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    def lr_scheduler(epoch):
        if epoch < 20:
            return 0.001
        elif epoch < 40:
            return 0.0005
        elif epoch < 80:
            return 0.0001
        elif epoch < 120:
            return 0.00005
        else:
            return 0.00001
        #return max(0.001 * (0.7 ** (epoch / 10)), 0.00001)

    lr_callback = tf.keras.callbacks.LearningRateScheduler(lr_scheduler,
                                                           verbose=1)

    class SaveCallback(tf.keras.callbacks.Callback):
        def on_epoch_end(self, epoch, logs=None):
            if epoch % 5 == 0:
                self.model.save("deepannotate.h5",
                                include_optimizer=True,
                                overwrite=True)
                model.save_weights("da_weights.h5")
                print("model saved!")

    model.fit(input_data,
              validation_data=eval_data,
              epochs=250,
              callbacks=[tensorboard_callback, lr_callback,
                         SaveCallback()])

    model.save("deepannotate.h5", include_optimizer=True, overwrite=True)
    model.save_weights("da_weights.h5")
Beispiel #2
0

import numpy as np
import tensorflow as tf
import common
import util
import model as M

util.config_gpu()

RESAMPLE_NUM = 20

weights_path = "../DeepAnnotate/models/da_weights.h5"
model = M.get_model_cls(common.NUM_POINT, common.NUM_CLASSES[2], False)
model.load_weights(weights_path)
model.summary()


def sample_one_obj(points, num):
    if points.shape[0] < common.NUM_POINT:
        return np.concatenate([points, np.zeros((common.NUM_POINT-points.shape[0], 3), dtype=np.float32)], axis=0)
    else:
        idx = np.arange(points.shape[0])
        np.random.shuffle(idx)
        return points[idx[0:num]]

def predict(points):
    points = np.array(points).reshape((-1,3))
    input_data = np.stack([x for x in map(lambda x: sample_one_obj(points, common.NUM_POINT), range(RESAMPLE_NUM))], axis=0)
    pred_val = model.predict(input_data)
    pred_cls = np.argmax(pred_val, axis=-1)
Beispiel #3
0
    def __init__(self, config):

        # Torch environment
        # ======================================================
        self.config = config
        self.device = torch.device(
            config['train']['device'] if torch.cuda.is_available() else "cpu")

        # Define model
        # =======================================================
        model_cls = get_model_cls(config['model']['name'])
        self.model = model_cls(**config['model']['kwargs'])
        self.model = self.model.to(self.device)

        # Define dataset
        # =======================================================
        tr_transform = transforms.Compose([
            transforms.Resize(config['dataset']['size']),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.Pad(10),
            transforms.RandomCrop(config['dataset']['size']),
            transforms.ColorJitter(brightness=0.3,
                                   contrast=0.1,
                                   saturation=0.1,
                                   hue=0),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
            RandomErasing(probability=0.3)
        ])
        te_transform = transforms.Compose([
            transforms.Resize(config['dataset']['size']),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

        dataset_cls = get_dataset_cls(config['dataset']['name'])
        train_dataset = dataset_cls(config['dataset']['root'],
                                    transform=tr_transform,
                                    mode='all')
        query_dataset = dataset_cls(config['dataset']['root'],
                                    transform=te_transform,
                                    mode='query')
        gallery_dataset = dataset_cls(config['dataset']['root'],
                                      transform=te_transform,
                                      mode='gallery')

        # Combine extra datasets
        for dataset_name in config['dataset']['extras']:
            dataset_cls = get_dataset_cls(dataset_name)
            dataset = dataset_cls(config['dataset']['root'],
                                  transform=tr_transform,
                                  mode='all')
            train_dataset = train_dataset + dataset

        print("Training dataset")
        print(train_dataset)

        # Define train/validation dataloader
        # =======================================================
        common_config = {'num_workers': 4, 'pin_memory': True}
        train_labels = [sample[1] for sample in train_dataset.data]
        sampler = BalancedBatchSampler(train_labels,
                                       P=config['dataloader']['P'],
                                       K=config['dataloader']['K'])
        self.train_loader = DataLoader(dataset=train_dataset,
                                       batch_sampler=sampler,
                                       **common_config)
        self.query_loader = DataLoader(
            dataset=query_dataset,
            batch_size=config['dataloader']['batch_size'],
            shuffle=False,
            **common_config)
        self.gallery_loader = DataLoader(
            dataset=gallery_dataset,
            batch_size=config['dataloader']['batch_size'],
            shuffle=False,
            **common_config)

        # Learning objective
        margin = config['loss']['margin']
        selector = RandomNegativeTripletSelector(margin=margin)
        self.triplet_loss = OnlineTripletLoss(margin, selector)
        self.crossentropy_loss = nn.CrossEntropyLoss()

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config['optimizer']['lr'],
                                    weight_decay=5e-4)
        self.schedular = lr_scheduler.StepLR(
            self.optimizer,
            step_size=config['schedular']['step_size'],
            gamma=0.1)

        # Tensorboard Writer
        # ======================================================
        dataset_name = "_".join([config['dataset']['name']] +
                                config['dataset']['extras'])
        self.log_dir = osp.join(config['train']['log_dir'],
                                config['train']['exp_name'])
        self.writer = SummaryWriter(log_dir=self.log_dir)

        # Current state
        self.best_mAP = 0
        self.current_epoch = -1

        # Resume training
        # =======================================================
        if config['train']['resume']:
            checkpoint_path = osp.join(self.log_dir, 'best.pth')
            checkpoint = torch.load(checkpoint_path)
            self.model.load_state_dict(checkpoint['model'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.schedular.load_state_dict(checkpoint['schedular'])
            self.best_mAP = checkpoint['current_mAP']
            self.current_epoch = checkpoint_path['current_epoch']
            print("Resume training at epoch '{}'".format(self.current_epoch))
Beispiel #4
0
    def __init__(self, config, rank=-1):
        self.rank = rank
        self.config = config

        # Training environment
        if config['train']['mode'] == 'parallel':
            gpu_id = config['train']['gpus'][rank]
            self.device = "cuda:{}".format(gpu_id)
        else:
            self.device = config['train']['device'] if torch.cuda.is_available(
            ) else "cpu"

        # Dataset
        train_transform = T.Compose([
            T.RandomResizedCrop(
                (config['dataset']['size'], config['dataset']['size'])),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        valid_transform = T.Compose([
            T.Resize(256),
            T.CenterCrop(
                (config['dataset']['size'], config['dataset']['size'])),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        train_dataset = ImageFolder(config['dataset']['train']['root'],
                                    transform=train_transform)
        valid_dataset = ImageFolder(config['dataset']['valid']['root'],
                                    transform=valid_transform)

        # Dataloader
        if config['train']['mode'] == 'parallel':
            self.sampler = DistributedSampler(train_dataset)
            self.train_loader = DataLoader(
                train_dataset,
                sampler=self.sampler,
                batch_size=config['dataloader']['batch_size'],
                num_workers=config['dataloader']['num_workers'],
                pin_memory=True,
                shuffle=False)
        else:
            self.train_loader = DataLoader(
                train_dataset,
                batch_size=config['dataloader']['batch_size'],
                num_workers=config['dataloader']['num_workers'],
                pin_memory=True,
                shuffle=True)

        self.valid_loader = DataLoader(
            valid_dataset,
            batch_size=config['dataloader']['batch_size'],
            num_workers=config['dataloader']['num_workers'],
            pin_memory=True,
            shuffle=False)
        # Model
        if config['model']['name'] == "resnet18":
            model_cls = resnet18
        else:
            model_cls = get_model_cls(config['model']['name'])
        model = model_cls(**config['model']['kwargs'])
        if config['train']['mode'] == 'parallel':
            model = model.to(self.device)
            self.model = DDP(model, device_ids=[config['train']['gpus'][rank]])
            # checkpoint = torch.load("run/darknet53_dist/best.pth")
            # self.model.load_state_dict(checkpoint['model'])
        else:
            self.model = model.to(self.device)

        # Optimizer
        self.optimizer = optim.SGD(
            self.model.parameters(),
            lr=config['optimizer']['lr'],
            momentum=config['optimizer']['momentum'],
            weight_decay=config['optimizer']['weight_decay'])
        # Scheduler
        self.scheduler = MultiStepLR(
            self.optimizer,
            milestones=config['scheduler']['milestones'],
            gamma=config['scheduler']['gamma'])

        # Loss funciton
        self.criterion = nn.CrossEntropyLoss().to(self.device)

        # Tensorboard
        self.log_dir = osp.join(config['train']['log_dir'],
                                config['train']['exp_name'])
        if ((self.rank == 0 and config['train']['mode'] == 'parallel')
                or self.rank < 0):
            self.writer = SummaryWriter(logdir=self.log_dir)

        # Dynamic state
        self.current_epoch = -1
        self.current_loss = 10000