コード例 #1
0
ファイル: train_loop.py プロジェクト: joaomonteirof/asv_base
    def __init__(self,
                 model,
                 optimizer,
                 train_loader,
                 valid_loader,
                 label_smoothing,
                 warmup_its,
                 max_gnorm=10.0,
                 verbose=-1,
                 device=0,
                 cp_name=None,
                 save_cp=False,
                 checkpoint_path=None,
                 checkpoint_epoch=None,
                 cuda=True,
                 logger=None):
        if checkpoint_path is None:
            # Save to current directory
            self.checkpoint_path = os.getcwd()
        else:
            self.checkpoint_path = checkpoint_path
            if not os.path.isdir(self.checkpoint_path):
                os.mkdir(self.checkpoint_path)

        self.save_epoch_fmt = os.path.join(
            self.checkpoint_path, cp_name) if cp_name else os.path.join(
                self.checkpoint_path, 'checkpoint_{}ep.pt')
        self.cuda_mode = cuda
        self.model = model
        self.optimizer = optimizer
        self.max_gnorm = max_gnorm
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.total_iters = 0
        self.cur_epoch = 0
        self.harvester_all = AllTripletSelector()
        self.verbose = verbose
        self.save_cp = save_cp
        self.device = device
        self.history = {'train_loss': [], 'train_loss_batch': []}
        self.logger = logger

        its_per_epoch = len(
            train_loader.dataset) // (train_loader.batch_size) + 1 if len(
                train_loader.dataset) % (train_loader.batch_size) > 0 else len(
                    train_loader.dataset) // (train_loader.batch_size)

        if label_smoothing > 0.0:
            self.ce_criterion = LabelSmoothingLoss(
                label_smoothing, lbl_set_size=train_loader.dataset.n_speakers)
        else:
            self.ce_criterion = torch.nn.CrossEntropyLoss()

        if self.valid_loader is not None:
            self.history['valid_loss'] = []

        if checkpoint_epoch is not None:
            self.load_checkpoint(self.save_epoch_fmt.format(checkpoint_epoch))
コード例 #2
0
    def __init__(self,
                 model,
                 optimizer,
                 train_loader,
                 valid_loader,
                 label_smoothing,
                 checkpoint_path=None,
                 checkpoint_epoch=None,
                 swap=False,
                 softmax=False,
                 mining=False,
                 cuda=True):
        if checkpoint_path is None:
            # Save to current directory
            self.checkpoint_path = os.getcwd()
        else:
            self.checkpoint_path = checkpoint_path
            if not os.path.isdir(self.checkpoint_path):
                os.mkdir(self.checkpoint_path)

        self.save_epoch_fmt = os.path.join(self.checkpoint_path,
                                           'checkpoint_{}ep.pt')
        self.cuda_mode = cuda
        self.softmax = softmax != 'none'
        self.mining = mining
        self.model = model
        self.swap = swap
        self.optimizer = optimizer
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.total_iters = 0
        self.cur_epoch = 0
        self.harvester = TripletHarvester()

        if self.softmax:
            if label_smoothing > 0.0:
                self.ce_criterion = LabelSmoothingLoss(
                    label_smoothing,
                    lbl_set_size=train_loader.dataset.n_speakers)
            else:
                self.ce_criterion = torch.nn.CrossEntropyLoss()

        if self.valid_loader is not None:
            self.history = {
                'train_loss': [],
                'train_loss_batch': [],
                'valid_loss': []
            }
        else:
            self.history = {'train_loss': [], 'train_loss_batch': []}

        if self.softmax:
            self.history['softmax_batch'] = []
            self.history['softmax'] = []

        if checkpoint_epoch is not None:
            self.load_checkpoint(self.save_epoch_fmt.format(checkpoint_epoch))
コード例 #3
0
ファイル: train.py プロジェクト: Vaden4d/logo-classifier
train_transform, valid_transform = get_transforms(img_size=args.image_size)
train_labeled_loader, valid_labeled_loader = get_loaders(
    train_labeled,
    test_labeled,
    train_transform,
    valid_transform,
    target_column=args.target_column,
    batch_size=args.batch_size,
    num_workers=args.num_workers,
    shuffle=True
)

if args.ssl:
    dataset_unlabeled = ImageDataset(unlabeled, train_transform, target_column=None)

loss = LabelSmoothingLoss(num_classes=2, smoothing=0.2, weight=None)

if args.ssl:
    print("Semi-supervised learning model is configured...")
    model = EfficientNetSSL(loss=loss, num_classes=2, weight=weight)
else:
    model = EfficientNetModel(loss=loss, num_classes=2, weight=weight)

model_checkpoint = ModelCheckpoint(monitor="val_acc_f1",
                                   verbose=True,
                                   dirpath="models/",
                                   mode="max",
                                   filename="{epoch}_{val_acc_f1:.4f}")

if args.ssl:
    # SSL approach changes only train dataloader and model class
コード例 #4
0
    def __init__(self,
                 model,
                 optimizer,
                 train_loader,
                 valid_loader,
                 margin,
                 lambda_,
                 label_smoothing,
                 warmup_its,
                 max_gnorm=10.0,
                 verbose=-1,
                 device=0,
                 cp_name=None,
                 save_cp=False,
                 checkpoint_path=None,
                 checkpoint_epoch=None,
                 swap=False,
                 lr_red_epoch=100,
                 lr_factor=0.1,
                 softmax=False,
                 pretrain=False,
                 mining=False,
                 cuda=True,
                 logger=None):
        if checkpoint_path is None:
            # Save to current directory
            self.checkpoint_path = os.getcwd()
        else:
            self.checkpoint_path = checkpoint_path
            if not os.path.isdir(self.checkpoint_path):
                os.mkdir(self.checkpoint_path)

        self.save_epoch_fmt = os.path.join(
            self.checkpoint_path, cp_name) if cp_name else os.path.join(
                self.checkpoint_path, 'checkpoint_{}ep.pt')
        self.cuda_mode = cuda
        self.softmax = softmax != 'none'
        self.pretrain = pretrain
        self.mining = mining
        self.model = model
        self.swap = swap
        self.lambda_ = lambda_
        self.optimizer = optimizer
        self.lr_red_epoch = lr_red_epoch
        self.lr_factor = lr_factor
        self.max_gnorm = max_gnorm
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.total_iters = 0
        self.cur_epoch = 0
        self.margin = margin
        self.harvester_mine = HardestNegativeTripletSelector(
            margin=self.margin, cpu=not self.cuda_mode)
        self.harvester_all = AllTripletSelector()
        self.verbose = verbose
        self.save_cp = save_cp
        self.device = device
        self.history = {'train_loss': [], 'train_loss_batch': []}
        self.logger = logger
        self.base_lr = self.optimizer.optimizer.param_groups[0]['lr']

        its_per_epoch = len(
            train_loader.dataset) // (train_loader.batch_size) + 1 if len(
                train_loader.dataset) % (train_loader.batch_size) > 0 else len(
                    train_loader.dataset) // (train_loader.batch_size)

        if self.softmax:
            if label_smoothing > 0.0:
                self.ce_criterion = LabelSmoothingLoss(
                    label_smoothing,
                    lbl_set_size=train_loader.dataset.n_speakers)
            else:
                self.ce_criterion = torch.nn.CrossEntropyLoss()

        if self.valid_loader is not None:
            self.history['valid_loss_emb'] = []
            self.history['valid_loss_out'] = []
            self.history['valid_loss_fus'] = []

        if self.softmax:
            self.history['softmax_batch'] = []
            self.history['softmax'] = []

        if checkpoint_epoch is not None:
            self.load_checkpoint(self.save_epoch_fmt.format(checkpoint_epoch))
コード例 #5
0
    def __init__(self,
                 model,
                 optimizer,
                 optimizer_pase,
                 train_loader,
                 valid_loader,
                 label_smoothing,
                 patience,
                 verbose=-1,
                 device=0,
                 cp_name=None,
                 save_cp=False,
                 checkpoint_path=None,
                 checkpoint_epoch=None,
                 cuda=True):
        if checkpoint_path is None:
            # Save to current directory
            self.checkpoint_path = os.getcwd()
        else:
            self.checkpoint_path = checkpoint_path
            if not os.path.isdir(self.checkpoint_path):
                os.mkdir(self.checkpoint_path)

        self.save_epoch_fmt = os.path.join(
            self.checkpoint_path, cp_name) if cp_name else os.path.join(
                self.checkpoint_path, 'checkpoint_{}ep.pt')
        self.cuda_mode = cuda
        self.model = model
        self.optimizer = optimizer
        self.optimizer_pase = optimizer_pase
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.total_iters = 0
        self.cur_epoch = 0
        self.harvester_all = AllTripletSelector()
        self.verbose = verbose
        self.save_cp = save_cp
        self.device = device
        self.history = {'train_loss': [], 'train_loss_batch': []}

        if label_smoothing > 0.0:
            self.ce_criterion = LabelSmoothingLoss(
                label_smoothing, lbl_set_size=train_loader.dataset.n_speakers)
        else:
            self.ce_criterion = torch.nn.CrossEntropyLoss()

        if self.valid_loader is not None:
            self.history['valid_loss'] = []
            self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer,
                factor=0.5,
                patience=patience,
                verbose=True if self.verbose > 0 else False,
                threshold=1e-4,
                min_lr=1e-7)
        else:
            self.scheduler = torch.optim.lr_scheduler.MultiStepLR(
                self.optimizer, milestones=[20, 100, 200, 300, 400], gamma=0.1)

        if checkpoint_epoch is not None:
            self.load_checkpoint(self.save_epoch_fmt.format(checkpoint_epoch))