Ejemplo n.º 1
0
 def setUpClass(self):
     self.backbone = Backbone().to(device)
     self.yolo_c_head = Yolo(head=ClassificationHead()).to(device)
     self.yolo_d_head = Yolo(
         DetectionHead(input_size=cfg.detection_head_input_size,
                       C=cfg.C,
                       B=cfg.B)).to(device)
     self.x = t.ones(1, 3, 448, 448).to(device)
Ejemplo n.º 2
0
    def train(self, train_dataset, valid_dataset=None, transfer='scratch'):
        """ train function
        :param train_dataset: train dataset built by tf.data
        :param valid_dataset: valid dataset build by td.data, optional
        :param transfer: pretrain
        :return:
        """
        steps_per_epoch = train_dataset.len / self.params['batch_size']
        self.total_steps = int(self.params['n_epochs'] * steps_per_epoch)
        self.params[
            'warmup_steps'] = self.params['warmup_epochs'] * steps_per_epoch

        with self.strategy.scope():
            self.lr_scheduler = LrScheduler(self.total_steps,
                                            self.params,
                                            scheduler_method='cosine')
            # => tf.keras.Model
            self.model = self.model(self.params['img_size'])

            ckpt = tf.train.Checkpoint(model=self.model,
                                       optimizer=self.optimizer)
            ckpt_manager = tf.train.CheckpointManager(
                ckpt, self.params['checkpoint_dir'], max_to_keep=5)
            if transfer == 'darknet':
                print("Load weights from ")
                model_pretrain = Yolo(self.params['yaml_dir'])()
                model_pretrain.load_weights()
                self.model.get_layer().set_weights()
            elif transfer == 'resume':
                print("Load weights from latest checkpoint")
                ckpt.restore(ckpt_manager.latest_checkpoint)
            elif transfer == 'scratch':
                print("Train from scratch")
                print(self.model.summary())

        train_dataset = self.strategy.experimental_distribute_dataset(
            train_dataset)

        for epoch in range(1, self.params['n_epochs'] + 1):
            for step, (image, target) in enumerate(train_dataset):
                loss = self.dist_train_step(image, target)
                print('=> Epoch {}, Step {}, Loss {:.5f}'.format(
                    epoch, self.global_step.numpy(), loss.numpy()))
                with self.log_writer.as_default():
                    tf.summary.scalar('loss', loss, step=self.global_step)
                    tf.summary.scalar('lr',
                                      self.optimizer.lr,
                                      step=self.global_step)
                self.log_writer.flush()

            if epoch % 3 == 0:
                ckpt_save_path = ckpt_manager.save()
                print('Saving checkpoint for epoch {} at {}'.format(
                    epoch, ckpt_save_path))

        self.export_model()
Ejemplo n.º 3
0
 def load_model(self):
     model_path = os.path.join("weights", self.args.model_name)
     if os.path.exists(model_path):
         weight_path = glob.glob(os.path.join(model_path, "*.pth"))
         if len(weight_path) == 0:
             assert False, "Model weight not found"
         elif len(weight_path) > 1:
             assert False, "Multiple weights are found. Please keep only one weight in your model directory"
         else:
             weight_path = weight_path[0]
     else:
         assert False, "Model is not exist"
     pretrained_dict = torch.load(weight_path, map_location=self.device)
     self.model = Yolo(n_classes=self.args.number_of_classes)
     self.model = self.model.to(self.device)
     self.model.load_state_dict(pretrained_dict)
Ejemplo n.º 4
0
    def build_model(self):
        if self.params['multi_gpus']:
            self.strategy = tf.distribute.MirroredStrategy(devices=None)
        else:
            self.strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")

        with self.strategy.scope():
            self.model = Yolo(yaml_dir=self.params['yaml_dir'])
            self.anchors = self.model.module_list[-1].anchors   
            self.stride = self.model.module_list[-1].stride
            self.num_classes = self.model.module_list[-1].num_classes

            self.loss_fn = YoloLoss(self.model.module_list[-1].anchors,
                                    ignore_iou_threshold=0.3,
                                    num_classes=self.num_classes,
                                    label_smoothing=self.params['label_smoothing'],
                                    img_size=self.params['img_size'])
            self.optimizer = Optimizer('adam')()   
Ejemplo n.º 5
0
class Trainer(object):
    """ Trainer class that uses the dataset and model to train
    # Usage
    data_loader = tf.data.Dataset()
    trainer = Trainer(params)
    trainer.train(data_loader)
    """
    def __init__(self, params):
        """ Constructor
        :param params: dict, with dir and training parameters
        """
        self.params = params
        if os.path.exists(self.params['log_dir']):
            shutil.rmtree(self.params['log_dir'])
        self.log_writer = tf.summary.create_file_writer(self.params['log_dir'])
        self.global_step = tf.Variable(0, trainable=False, dtype=tf.int64)
        self.build_model()

    def build_model(self):
        """ Build the model,
        define the training strategy and model, loss, optimizer
        :return:
        """
        if self.params['multi_gpus']:
            self.strategy = tf.distribute.MirroredStrategy(devices=None)
        else:
            self.strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")

        with self.strategy.scope():
            self.model = Yolo(yaml_dir=self.params['yaml_dir'])
            self.anchors = self.model.module_list[-1].anchors
            self.stride = self.model.module_list[-1].stride
            self.num_classes = self.model.module_list[-1].num_classes

            self.loss_fn = YoloLoss(
                self.model.module_list[-1].anchors,
                ignore_iou_threshold=0.3,
                num_classes=self.num_classes,
                label_smoothing=self.params['label_smoothing'],
                img_size=self.params['img_size'])
            self.optimizer = Optimizer('adam')()

    def train(self, train_dataset, valid_dataset=None, transfer='scratch'):
        """ train function
        :param train_dataset: train dataset built by tf.data
        :param valid_dataset: valid dataset build by td.data, optional
        :param transfer: pretrain
        :return:
        """
        steps_per_epoch = train_dataset.len / self.params['batch_size']
        self.total_steps = int(self.params['n_epochs'] * steps_per_epoch)
        self.params[
            'warmup_steps'] = self.params['warmup_epochs'] * steps_per_epoch

        with self.strategy.scope():
            self.lr_scheduler = LrScheduler(self.total_steps, self.params)
            # => tf.keras.Model
            self.model = self.model(self.params['img_size'])

            ckpt = tf.train.Checkpoint(model=self.model,
                                       optimizer=self.optimizer)
            ckpt_manager = tf.train.CheckpointManager(
                ckpt, self.params['checkpoint_dir'], max_to_keep=5)
            if transfer == 'darknet':
                print("Load weights from ")
                model_pretrain = Yolo(self.params['yaml_dir'])()
                model_pretrain.load_weights()
                self.model.get_layer().set_weights()
            elif transfer == 'resume':
                print("Load weights from latest checkpoint")
                ckpt.restore(ckpt_manager.latest_checkpoint)
            elif transfer == 'scratch':
                print("Train from scratch")
                print(self.model.summary())

        train_dataset = self.strategy.experimental_distribute_dataset(
            train_dataset)

        for epoch in range(1, self.params['n_epochs'] + 1):
            for step, (image, target) in enumerate(train_dataset):
                loss = self.dist_train_step(image, target)
                print('=> Epoch {}, Step {}, Loss {:.5f}'.format(
                    epoch, self.global_step.numpy(), loss.numpy()))
                with self.log_writer.as_default():
                    tf.summary.scalar('loss', loss, step=self.global_step)
                    tf.summary.scalar('lr',
                                      self.optimizer.lr,
                                      step=self.global_step)
                self.log_writer.flush()

            if epoch % 3 == 0:
                ckpt_save_path = ckpt_manager.save()
                print('Saving checkpoint for epoch {} at {}'.format(
                    epoch, ckpt_save_path))

        self.export_model()

    # @tf.function
    def train_step(self, image, target):
        with tf.GradientTape() as tape:
            logit = self.model(image, training=True)
            iou_loss, conf_loss, prob_loss = self.loss_fn(target, logit)
            total_loss = iou_loss + conf_loss + prob_loss

        gradients = tape.gradient(total_loss, self.model.trainable_variables)
        self.optimizer.apply_gradients(
            zip(gradients, self.model.trainable_variables))

        lr = self.lr_scheduler.step()
        self.optimizer.lr.assign(lr)
        self.global_step.assign_add(1)
        return total_loss

    @tf.function
    def dist_train_step(self, image, target):
        with self.strategy.scope():
            loss = self.strategy.run(self.train_step, args=(image, target))
            total_loss_mean = self.strategy.reduce(tf.distribute.ReduceOp.MEAN,
                                                   loss,
                                                   axis=None)
            return total_loss_mean

    def validate(self, valid_dataset):
        valid_loss = []
        for step, (image, target) in enumerate(valid_dataset):
            step_valid_loss = self.valid_step(image, target)
            valid_loss.append(step_valid_loss)
        return np.mean(valid_loss)

    def valid_step(self, image, label):
        logit = self.model(image, training=False)
        iou_loss, conf_loss, prob_loss = self.loss_fn(label, logit)
        return iou_loss + conf_loss + prob_loss

    def export_model(self):
        tf.saved_model.save(self.model, self.params['saved_model_dir'])
        print("pb model saved in {}".format(self.params['saved_model_dir']))
Ejemplo n.º 6
0
from torch.utils.data import DataLoader
import numpy as np
from torch.optim import SGD

if __name__ == "__main__":
    device = t.device("cpu")
    root = os.path.join(cfg.data_dir, 'shape')

    transform = YoloAugmentation(size=448)
    dataset = ShapeDataset(root=root, transform=transform)
    loader = DataLoader(dataset,
                        batch_size=cfg.batch_size,
                        shuffle=True,
                        collate_fn=detection_collate)

    model = Yolo(DetectionHead(C=cfg.C, B=cfg.B)).to(device)
    if os.path.exists('model.pkl'):
        print('Loading model...')
        model.load_state_dict(t.load('model.pkl'))

    trainer = SGD(model.parameters(), lr=0.01, momentum=0.8)
    criteria = YoloLoss(cfg.S, cfg.B, cfg.C, cfg.feat_stride)

    epoch = 1

    model.train()

    for e in range(epoch):
        train_loss = 0
        i = 0
        for imgs, bboxes, labels in loader:
Ejemplo n.º 7
0
class Detect:
    def __init__(self, args):
        self.args = args
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.class_names = load_class_names(
            os.path.join(self.args.data_folder, "class.names"))
        self.model = None

    def load_model(self):
        model_path = os.path.join("weights", self.args.model_name)
        if os.path.exists(model_path):
            weight_path = glob.glob(os.path.join(model_path, "*.pth"))
            if len(weight_path) == 0:
                assert False, "Model weight not found"
            elif len(weight_path) > 1:
                assert False, "Multiple weights are found. Please keep only one weight in your model directory"
            else:
                weight_path = weight_path[0]
        else:
            assert False, "Model is not exist"
        pretrained_dict = torch.load(weight_path, map_location=self.device)
        self.model = Yolo(n_classes=self.args.number_of_classes)
        self.model = self.model.to(self.device)
        self.model.load_state_dict(pretrained_dict)

    def save_results(self, imgs, boxes):
        save_folder = os.path.join("outputs", self.args.model_name)
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)
        for i, (img_path, box) in enumerate(zip(imgs, boxes)):
            plot_boxes(img_path, box, self.class_names, self.args.img_size,
                       save_folder)

    def detect(self):
        dataset = ImageDataset(os.path.join(self.args.data_folder, "detect"),
                               img_size=self.args.img_size,
                               ext=self.args.ext)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=self.args.batch_size, shuffle=False)

        self.load_model()
        self.model.eval()

        start = time.time()
        for img_path, img in dataloader:
            boxes, imgs = [], []

            img = img.to(self.device)

            with torch.no_grad():
                temp = time.time()
                output, _ = self.model(
                    img)  # batch=1 -> [1, n, n], batch=3 -> [3, n, n]
                temp1 = time.time()
                box = post_process(output, self.args.conf_thres,
                                   self.args.nms_thres)
                temp2 = time.time()
                boxes.extend(box)
                print('-----------------------------------')
                num = 0
                for b in box:
                    if b is None:
                        break
                    num += len(b)
                print("{}-> {} objects found".format(img_path, num))
                print("Inference time : ", round(temp1 - temp, 5))
                print("Post-processing time : ", round(temp2 - temp1, 5))
                print('-----------------------------------')

            imgs.extend(img_path)
            self.save_results(imgs, boxes)

        end = time.time()

        print('-----------------------------------')
        print("Total detecting time : ", round(end - start, 5))
        print('-----------------------------------')