예제 #1
0
 def train_epoch(self, model: BaseModel, data_loader: DataLoader) -> float:
     loss_list = []
     for train_x, teacher in data_loader:
         train_x = try_cuda(train_x)
         teacher = try_cuda(teacher)
         loss = model.train_batch(train_x, teacher)
         loss_list.append(loss)
     return mean(loss_list)
예제 #2
0
    def train_batch(self, train_x: torch.Tensor, teacher: torch.Tensor) -> float:
        """
        :param train_x: (batch size, channel, height, width)
        :param teacher: (batch size, )
        """
        self._model.train()
        train_x = try_cuda(train_x).float()
        teacher = try_cuda(teacher).long()

        # compute output
        output = self._model(train_x)
        loss = self._criterion(output, teacher)
        # compute gradient and do SGD step
        self._optimizer.zero_grad()
        loss.backward()
        self._optimizer.step()
        return loss.item()
예제 #3
0
 def predict(self, inputs):
     """
     :param inputs: (batch size, channel, height, width)
     :return: (batch size, class)
     """
     self._model.eval()
     with torch.no_grad():
         inputs = try_cuda(inputs).float()
         output = nn.Softmax(dim=1)(self._model(inputs))
         pred_ids = output.cpu().numpy()
     return pred_ids
def get_model(dataset_setting: DataSetSetting,
              model_type: str,
              lr: float,
              efficientnet_scale: int = 0):
    # TODO モデルはここを追加
    if MODEL_EFFICIENT_NET == model_type:
        return EfficientNet(num_classes=dataset_setting.n_classes,
                            lr=lr,
                            network=f"efficientnet-b{efficientnet_scale}")
    elif MODEL_ATTENTION_BRANCH_NETWORK == model_type:
        return try_cuda(
            AttentionBranchNetwork(n_classes=dataset_setting.n_classes, lr=lr))
    elif MODEL_MOBILENET == model_type:
        return MobileNetV3(num_classes=dataset_setting.n_classes,
                           lr=lr,
                           pretrained=False)
    assert f"Invalid model type. Valid models is {MODEL_TYPES}"
parser.add_argument('--n_models', type=int, default=3, help="Count of model.")

if __name__ == "__main__":
    args = parser.parse_args()

    # Fetch dataset.
    dataset_setting = DataSetSetting.from_dataset_type(settings, args.dataset)
    train_dataloader, test_dataloader, train_dataset, test_dataset = get_dataloader(
        dataset_setting, args.dataset_root, args.batch_size)
    callbacks = []
    model_list, learning_tables = [], []
    for _ in range(args.n_models):
        model_list.append(
            try_cuda(
                get_model(dataset_setting,
                          model_type=args.model,
                          lr=args.lr,
                          efficientnet_scale=args.efficientnet_scale)))
        learning_tables.append(
            LearningTable(data_loader=train_dataloader,
                          callbacks=callbacks,
                          epochs=args.epoch))

    assemble_model: AssembleModel = AssembleModel(models=model_list)
    # Fetch model and load weight.
    if args.load_weight_dir:
        assemble_model.load_weight(args.load_weight_dir)
    save_weight_dir = args.save_weight_dir or f"./{args.model}"

    # Training.
    trainer = XTrainer(assemble_model)
예제 #6
0
parser.add_argument('--efficientdet_scale',
                    type=int,
                    default=0,
                    help="Scale of EfficientDet.")
parser.add_argument('--label_names_path',
                    type=str,
                    default="voc_label_names.txt",
                    help="File path of label names (Detection only)")

if __name__ == "__main__":
    args = parser.parse_args()

    # Fetch model and load weight.
    model = try_cuda(
        get_model(args.n_classes,
                  args.image_size,
                  model_type=args.model,
                  submodel_type=args.submodel,
                  model_scale=args.efficientdet_scale))
    model.load_weight(args.load_weight_path)

    if isinstance(model, SegmentationModel):
        RealtimeSegmentation(
            model=model,
            img_size_for_model=(args.image_size,
                                args.image_size)).realtime_predict()
    elif isinstance(model, DetectionModel):
        assert args.label_names_path is not None
        with open(args.label_names_path, "r") as file:
            label_names = []
            for label in file:
                label_names.append(label)