Пример #1
0
def main():
    args = parser.parse_args()
    assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2."
    # check if gpu training is available
    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda')
        cudnn.deterministic = True
        cudnn.benchmark = True
    else:
        args.device = torch.device('cpu')
        args.gpu_index = -1

    dataset = ContrastiveLearningDataset(args.data)

    train_dataset = dataset.get_dataset(args.dataset_name, args.n_views)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True, drop_last=True)

    model = ResNetSimCLR(base_model=args.arch, out_dim=args.out_dim)

    optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0,
                                                           last_epoch=-1)

    #  It’s a no-op if the 'gpu_index' argument is a negative integer or None.
    with torch.cuda.device(args.gpu_index):
        simclr = SimCLR(model=model, optimizer=optimizer, scheduler=scheduler, args=args)
        simclr.train(train_loader)
Пример #2
0
def main():
    args = parser.parse_args()
    assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2."
    # check if gpu training is available
    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda')
        cudnn.deterministic = True
        cudnn.benchmark = True
    else:
        args.device = torch.device('cpu')
        args.gpu_index = -1

    dataset = ContrastiveLearningDataset(args.data)

    train_dataset = dataset.get_dataset(args.dataset_name, args.n_views)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    model = ResNetSimCLR(base_model=args.arch,
                         out_dim=args.out_dim).to(args.device)

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=len(train_loader), eta_min=0, last_epoch=-1)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading resumed checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume,
                                    map_location=torch.device('cpu'))
            args.start_epoch = checkpoint['epoch']
            args.start_iter = checkpoint['iteration']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            scheduler.load_state_dict(checkpoint['scheduler'])
            print("=> loaded resumed checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("[Warning] no checkpoint found at '{}'".format(args.resume))
    else:
        args.start_iter = 0
        args.start_epoch = 0

    #  It’s a no-op if the 'gpu_index' argument is a negative integer or None.
    with torch.cuda.device(args.gpu_index):
        simclr = SimCLR(model=model,
                        optimizer=optimizer,
                        scheduler=scheduler,
                        args=args)
        simclr.train(train_loader)
Пример #3
0
def main():
    args = parser.parse_args()
    assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2."
    # check if gpu training is available
    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda')
        cudnn.deterministic = True
        cudnn.benchmark = True
    else:
        args.device = torch.device('cpu')
        args.gpu_index = -1
    set_random_seed(args.seed)

    train_loader, valid_loader = get_dataloaders(args)

    if args.mode == 'simclr':
        model = ResNetSimCLR(base_model=args.arch, out_dim=args.out_dim)
        trainer_class = SimCLRTrainer
    elif args.mode == 'supervised':
        model = ResNetSimCLR(base_model=args.arch, out_dim=len(train_loader.dataset.classes))
        trainer_class = SupervisedTrainer
    else:
        raise InvalidTrainingMode()

    if args.optimizer_mode == 'simclr':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader),
                                                               eta_min=0, last_epoch=-1)

    else:
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr,
                                    momentum=0.9, weight_decay=args.weight_decay)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)

    #  It’s a no-op if the 'gpu_index' argument is a negative integer or None.
    with torch.cuda.device(args.gpu_index):
        trainer = trainer_class(model=model, optimizer=optimizer, scheduler=scheduler, args=args)
        trainer.train(train_loader, valid_loader)
Пример #4
0
def run(config):
    model = ResNetSimCLR('resnet50', config.out_dim)
    model.load_state_dict(
        torch.load(config.model_file, map_location=config.device))
    model = model.to(config.device)
    clf = nn.Linear(2048, 196)
    full = FineTuner(model, clf)
    optim = torch.optim.Adam(list(model.parameters()) + list(clf.parameters()))
    objective = nn.CrossEntropyLoss()

    t = T.Compose([
        T.Resize(512),
        T.CenterCrop(512),
        T.ToTensor(),
        T.Lambda(lambda x: x.repeat(3, 1, 1) if x.shape[0] == 1 else x)
    ])
    train_data_dir = os.path.join(config.data_root, 'cars_train/')
    train_annos = os.path.join(config.data_root, 'devkit/cars_train_annos.mat')
    train_dataset = StanfordCarsMini(train_annos, train_data_dir, t)
    train_loader = DataLoader(train_dataset,
                              batch_size=config.batch_size,
                              shuffle=True)

    valid_data_dir = os.path.join(config.data_root, 'cars_test/')
    valid_annos = os.path.join(config.data_root,
                               'devkit/cars_test_annos_withlabels.mat')
    valid_dataset = CarsDataset(valid_annos, valid_data_dir, t)
    valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size)

    solver = CESolver(full,
                      train_loader,
                      valid_loader,
                      config.save_root,
                      name=config.name,
                      device=config.device)
    solver.train(config.num_epochs)
Пример #5
0
    def train(self):

        train_loader, valid_loader = self.dataset.get_data_loaders()

        model = ResNetSimCLR(**self.config["model"]).to(self.device)
        model = self._load_pre_trained_weights(model)

        optimizer = torch.optim.Adam(model.parameters(),
                                     3e-4,
                                     weight_decay=eval(
                                         self.config['weight_decay']))

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=self.config['epochs'], eta_min=0, last_epoch=-1)

        if apex_support and self.config['fp16_precision']:
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level='O2',
                                              keep_batchnorm_fp32=True)

        model_checkpoints_folder = os.path.join(self.writer.log_dir,
                                                'checkpoints')

        # save config file
        _save_config_file(model_checkpoints_folder)

        n_iter = 0
        valid_n_iter = 0
        best_valid_loss = np.inf

        for epoch_counter in range(self.config['epochs']):
            for (xis, xjs), _ in train_loader:
                optimizer.zero_grad()

                xis = xis.to(self.device)
                xjs = xjs.to(self.device)

                loss = self._step(model, xis, xjs, n_iter)

                if n_iter % self.config['log_every_n_steps'] == 0:
                    self.writer.add_scalar('train_loss',
                                           loss,
                                           global_step=n_iter)

                if apex_support and self.config['fp16_precision']:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                optimizer.step()
                n_iter += 1

            # validate the model if requested
            if epoch_counter % self.config['eval_every_n_epochs'] == 0:
                valid_loss = self._validate(model, valid_loader)
                if valid_loss < best_valid_loss:
                    # save the model weights
                    best_valid_loss = valid_loss
                    torch.save(
                        model.state_dict(),
                        os.path.join(model_checkpoints_folder, 'model.pth'))

                self.writer.add_scalar('validation_loss',
                                       valid_loss,
                                       global_step=valid_n_iter)
                valid_n_iter += 1

            # warmup for the first 10 epochs
            if epoch_counter >= 10:
                scheduler.step()
            self.writer.add_scalar('cosine_lr_decay',
                                   scheduler.get_lr()[0],
                                   global_step=n_iter)
Пример #6
0
    def train(self):

        train_loader, valid_loader = self.dataset.get_data_loaders()

        model = ResNetSimCLR(**self.config["model"]).to(self.device)
        model = self._load_pre_trained_weights(model)

        if self.augmentor_type == "cnn":
            if self.config["normalization_type"] == "original":
                augmentor = LpAugmentor(
                    clip=self.config["augmentor_clip_output"])
                augmentor.to(self.device)
            elif self.config["normalization_type"] == "spectral":
                augmentor = LpAugmentorSpecNorm(
                    clip=self.config["augmentor_clip_output"])
                augmentor.to(self.device)
            else:
                raise ValueError("Unregonized normalization type: {}".format(
                    self.config["normalization_type"]))
        elif self.augmentor_type == "style_transfer":
            augmentor = LpAugmentorStyleTransfer(
                clip=self.config["augmentor_clip_output"])
            augmentor.to(self.device)
        elif self.augmentor_type == "transformer":
            augmentor = LpAugmentorTransformer(
                clip=self.config["augmentor_clip_output"])
            augmentor.to(self.device)
        else:
            raise ValueError("Unrecognized augmentor type: {}".format(
                self.augmentor_type))

        augmentor_optimizer = torch.optim.Adam(augmentor.parameters(), 3e-4)
        augmentor_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            augmentor_optimizer,
            T_max=len(train_loader),
            eta_min=0,
            last_epoch=-1)

        optimizer = torch.optim.Adam(
            list(model.parameters()),
            3e-4,
            weight_decay=eval(self.config["weight_decay"]),
        )
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=len(train_loader), eta_min=0, last_epoch=-1)

        if apex_support and self.config["fp16_precision"]:
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level="O2",
                                              keep_batchnorm_fp32=True)

        model_checkpoints_folder = os.path.join(self.writer.log_dir,
                                                "checkpoints")

        # save config file
        _save_config_file(model_checkpoints_folder)

        n_iter = 0
        valid_n_iter = 0
        best_valid_loss = np.inf

        for epoch_counter in range(self.config["epochs"]):
            print("====== Epoch {} =======".format(epoch_counter))
            for (xis, xjs), _ in train_loader:
                optimizer.zero_grad()

                xis = xis.to(self.device)
                xjs = xjs.to(self.device)

                loss = self._adv_step(model, augmentor, xis, xjs, n_iter)

                if n_iter % self.config["log_every_n_steps"] == 0:
                    self.writer.add_scalar("train_loss",
                                           loss,
                                           global_step=n_iter)

                if apex_support and self.config["fp16_precision"]:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                # for p in augmentor.parameters():
                #     # print(p.name)
                #     p.grad *= -1.0
                optimizer.step()

                # Update augmentor
                augmentor_optimizer.zero_grad()
                loss = self._adv_step(model, augmentor, xis, xjs, n_iter)
                if self.augmentor_loss_type == "hinge":
                    loss = torch.clamp(loss, 0.0, 5.4)
                loss *= -1.0
                loss.backward()
                augmentor_optimizer.step()

                n_iter += 1

            # validate the model if requested
            if epoch_counter % self.config["eval_every_n_epochs"] == 0:
                valid_loss = self._validate(model, augmentor, valid_loader)
                if valid_loss < best_valid_loss:
                    # save the model weights
                    best_valid_loss = valid_loss
                    torch.save(
                        model.state_dict(),
                        os.path.join(model_checkpoints_folder, "model.pth"),
                    )
                print("validation loss: ", valid_loss)
                self.writer.add_scalar("validation_loss",
                                       valid_loss,
                                       global_step=valid_n_iter)
                valid_n_iter += 1

            # warmup for the first 10 epochs
            if epoch_counter >= 10:
                scheduler.step()
                augmentor_scheduler.step()

            self.writer.add_scalar("cosine_lr_decay",
                                   scheduler.get_lr()[0],
                                   global_step=n_iter)
Пример #7
0
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu

    # suppress printing if not master
    if args.multiprocessing_distributed and args.gpu != 0:

        def print_pass(*args):
            pass

        builtins.print = print_pass

    if args.gpu is not None:
        print(args.gpu)
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    print("=> creating model '{}'".format(args.arch))

    model = ResNetSimCLR(base_model=args.arch,
                         out_dim=args.out_dim).to(args.gpu)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.num_workers = int(
                (args.num_workers + ngpus_per_node - 1) / ngpus_per_node)
            model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        # comment out the following line for debugging
        #raise NotImplementedError("Only DistributedDataParallel is supported.")
    #else:
    # AllGather implementation (batch shuffle, queue update, etc.) in
    # this code only supports DistributedDataParallel.
    #raise NotImplementedError("Only DistributedDataParallel is supported.")

    # Data loader
    train_loader, train_sampler = data_loader(args.dataset,
                                              args.data_path,
                                              args.batch_size,
                                              args.num_workers,
                                              download=args.download,
                                              distributed=args.distributed,
                                              supervised=False)

    #optimizer = torch.optim.Adam(model.parameters(), 3e-4, weight_decay=args.weight_decay)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                           T_max=args.epochs,
                                                           eta_min=0,
                                                           last_epoch=-1)

    criterion = NTXentLoss(args.gpu, args.batch_size, args.temperature,
                           True).cuda(args.gpu)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if apex_support and args.fp16_precision:
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level='O2',
                                          keep_batchnorm_fp32=True)

    cudnn.benchmark = True

    train(model, train_loader, train_sampler, criterion, optimizer, scheduler,
          args, ngpus_per_node)
Пример #8
0
                          num_workers=config['num_workers'],
                          drop_last=True,
                          shuffle=True)

# model = Encoder(out_dim=out_dim)
model = ResNetSimCLR(base_model=config["base_convnet"], out_dim=out_dim)

train_gpu = torch.cuda.is_available()
print("Is gpu available:", train_gpu)

# moves the model parameters to gpu
if train_gpu:
    model.cuda()

criterion = torch.nn.CrossEntropyLoss(reduction='sum')
optimizer = optim.Adam(model.parameters(), 3e-4)

train_writer = SummaryWriter()

sim_func_dim1, sim_func_dim2 = get_similarity_function(use_cosine_similarity)

# Mask to remove positive examples from the batch of negative samples
negative_mask = get_negative_mask(batch_size)

n_iter = 0
for e in range(config['epochs']):
    for step, ((xis, xjs), _) in enumerate(train_loader):

        optimizer.zero_grad()

        if train_gpu:
Пример #9
0
    def train(self):

        train_loader, valid_loader = self.dataset.get_data_loaders()

        model = ResNetSimCLR(**self.config["model"]).to(self.device)
        model = self._load_pre_trained_weights(model)

        criterion = nn.CrossEntropyLoss()  # loss function

        optimizer = torch.optim.Adam(model.parameters(),
                                     3e-4,
                                     weight_decay=eval(
                                         self.config['weight_decay']))

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=self.config['epochs'], eta_min=0, last_epoch=-1)

        if apex_support and self.config['fp16_precision']:
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level='O2',
                                              keep_batchnorm_fp32=True)

        model_checkpoints_folder = os.path.join(
            '/home/zhangchunhui/MedicalAI/USCL/checkpoints_multi_aug',
            'checkpoint_' + str(self.Checkpoint_Num))

        # save config file
        _save_config_file(model_checkpoints_folder)

        start_time = time.time()
        end_time = time.time()
        valid_n_iter = 0
        best_valid_loss = np.inf

        for epoch in range(self.config['epochs']):
            for i, data in enumerate(train_loader, 1):
                # forward
                # mixupimg1, label1, mixupimg2, label2, original img1, original img2
                xis, labelis, xjs, labeljs, imgis, imgjs = data  # N samples of left branch, N samples of right branch

                xis = xis.to(self.device)
                xjs = xjs.to(self.device)

                ####### 1-Semi-supervised
                hi, xi, outputis = model(xis)
                hj, xj, outputjs = model(xjs)
                labelindexi, labelindexj = FindNotX(
                    labelis.tolist(), 9999), FindNotX(labeljs.tolist(),
                                                      9999)  # X=9999=no label

                lossi = criterion(outputis[labelindexi],
                                  labelis.to(self.device)[labelindexi])
                lossj = criterion(outputjs[labelindexj],
                                  labeljs.to(self.device)[labelindexj])

                # lumbda1=lumbda2   # small value is better
                lumbda1, lumbda2 = self.lumbda1, self.lumbda2  # small value is better
                loss = self._step(model, xis,
                                  xjs) + lumbda1 * lossi + lumbda2 * lossj
                ########################################################################################################

                ####### 2-Self-supervised
                # loss = self._step(model, xis, xjs)
                ########################################################################################################

                # backward
                optimizer.zero_grad()
                if apex_support and self.config['fp16_precision']:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                # update weights
                optimizer.step()

                if i % self.config['log_every_n_steps'] == 0:
                    # self.writer.add_scalar('train_loss', loss, global_step=i)
                    start_time, end_time = end_time, time.time()
                    print(
                        "\nTraining:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Time: {:.2f}s"
                        .format(epoch + 1, self.config['epochs'], i,
                                len(train_loader), loss,
                                end_time - start_time))

            # validate the model if requested
            if epoch % self.config['eval_every_n_epochs'] == 0:
                start_time = time.time()
                valid_loss = self._validate(model, valid_loader)
                end_time = time.time()
                if valid_loss < best_valid_loss:
                    # save the model weights
                    best_valid_loss = valid_loss
                    torch.save(
                        model.state_dict(),
                        os.path.join(model_checkpoints_folder,
                                     'best_model.pth'))

                print(
                    "Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Time: {:.2f}s"
                    .format(epoch + 1, self.config['epochs'],
                            len(valid_loader), len(valid_loader), valid_loss,
                            end_time - start_time))
                # self.writer.add_scalar('validation_loss', valid_loss, global_step=valid_n_iter)
                valid_n_iter += 1

            print('Learning rate this epoch:',
                  scheduler.get_last_lr()[0])  # python >=3.7
            # print('Learning rate this epoch:', scheduler.base_lrs[0])   # python 3.6

            # warmup for the first 10 epochs
            if epoch >= 10:
                scheduler.step()
Пример #10
0
    def train(self):

        train_loader, valid_loader = self.dataset.get_data_loaders()

        model = ResNetSimCLR(**self.config["model"]).to(self.device) #just a resnet backbone

        model = self._load_pre_trained_weights(model) #checkpoints (shall we  convert TF checkpoint in to Torch and train or train from the scratch since we have f*****g lot?)

        optimizer = torch.optim.Adam(model.parameters(), 3e-4, weight_decay=eval(self.config['weight_decay']))

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0,
                                                               last_epoch=-1)  #learning rate shedulers (let's use as it is)

        if apex_support and self.config['fp16_precision']:
            model, optimizer = amp.initialize(model, optimizer,
                                              opt_level='O2',
                                              keep_batchnorm_fp32=True)

        model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')

        # save config file
        _save_config_file(model_checkpoints_folder)

        n_iter = 0
        valid_n_iter = 0
        best_valid_loss = np.inf


        for epoch_counter in range(self.config['epochs']):  #start training 
            for (x, y) in train_loader:  #dataset
                optimizer.zero_grad()

        

                x = x.to(self.device)  #in SimCLR we calculate the loss with two augmentation versions
                y = y.to(self.device)

                
                loss = self._step(model, x, y)

             

                if n_iter % self.config['log_every_n_steps'] == 0:
                    self.writer.add_scalar('train_loss', loss, global_step=n_iter)

                if apex_support and self.config['fp16_precision']:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                optimizer.step()
                n_iter += 1

            # validate the model if requested
            if epoch_counter % self.config['eval_every_n_epochs'] == 0:
                valid_loss = self._validate(model, valid_loader)
                print('Epoch:',epoch_counter,' ---',' validation_loss:',valid_loss)
                if valid_loss < best_valid_loss:
                    # save the model weights
                    best_valid_loss = valid_loss
                    torch.save(model.state_dict(), os.path.join(model_checkpoints_folder, 'model.pth'))

                self.writer.add_scalar('validation_loss', valid_loss, global_step=valid_n_iter)
                valid_n_iter += 1

            # warmup for the first 10 epochs
            if epoch_counter >= 10:
                scheduler.step()
            self.writer.add_scalar('cosine_lr_decay', scheduler.get_lr()[0], global_step=n_iter)
Пример #11
0
    def train(self):
        #Data
        train_loader, valid_loader = self.dataset.get_data_loaders()

        #Model
        model = ResNetSimCLR(**self.config["model"])
        if self.device == 'cuda':
            model = nn.DataParallel(model, device_ids=[i for i in range(self.config['gpu']['gpunum'])])
        #model = model.to(self.device)
        model = model.cuda()
        print(model)
        model = self._load_pre_trained_weights(model)
        
        each_epoch_steps = len(train_loader)
        total_steps = each_epoch_steps * self.config['train']['epochs'] 
        warmup_steps = each_epoch_steps * self.config['train']['warmup_epochs']
        scaled_lr = eval(self.config['train']['lr']) * self.batch_size / 256.

        optimizer = torch.optim.Adam(
                     model.parameters(), 
                     scaled_lr, 
                     weight_decay=eval(self.config['train']['weight_decay']))
       
        '''
        optimizer = LARS(params=model.parameters(),
                     lr=eval(self.config['train']['lr']),
                     momentum=self.config['train']['momentum'],
                     weight_decay=eval(self.config['train']['weight_decay'],
                     eta=0.001,
                     max_epoch=self.config['train']['epochs'])
        '''

        # scheduler during warmup stage
        lambda1 = lambda epoch:epoch*1.0 / int(warmup_steps)
        scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)

        if apex_support and self.config['train']['fp16_precision']:
            model, optimizer = amp.initialize(model, optimizer,
                                              opt_level='O2',
                                              keep_batchnorm_fp32=True)

        model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')

        # save config file
        _save_config_file(model_checkpoints_folder)

        n_iter = 0
        valid_n_iter = 0
        best_valid_loss = np.inf
        lr = eval(self.config['train']['lr']) 

        end = time.time()
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        
        for epoch_counter in range(self.config['train']['epochs']):
            model.train()
            for i, ((xis, xjs), _) in enumerate(train_loader):
                data_time.update(time.time() - end)
                optimizer.zero_grad()

                xis = xis.cuda()
                xjs = xjs.cuda()

                loss = self._step(model, xis, xjs, n_iter)

                #print("Loss: ",loss.data.cpu())
                losses.update(loss.item(), 2 * xis.size(0))

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()
                print('Epoch: [{epoch}][{step}/{each_epoch_steps}] Loss {loss.val:.4f} Avg Loss {loss.avg:.4f} DataTime {datatime.val:.4f} BatchTime {batchtime.val:.4f} LR {lr})'.format(epoch=epoch_counter, step=i, each_epoch_steps=each_epoch_steps, loss=losses, datatime=data_time, batchtime=batch_time, lr=lr))

                if n_iter % self.config['train']['log_every_n_steps'] == 0:
                    self.writer.add_scalar('train_loss', loss, global_step=n_iter)

                if apex_support and self.config['train']['fp16_precision']:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                optimizer.step()
                n_iter += 1

                #adjust lr
                if n_iter == warmup_steps:
                    # scheduler after warmup stage
                    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_steps-warmup_steps, eta_min=0, last_epoch=-1)
                scheduler.step()
                lr = scheduler.get_lr()[0]
                self.writer.add_scalar('cosine_lr_decay', scheduler.get_lr()[0], global_step=n_iter)
                sys.stdout.flush()

            # validate the model if requested
            if epoch_counter % self.config['train']['eval_every_n_epochs'] == 0:
                valid_loss = self._validate(model, valid_loader)
                if valid_loss < best_valid_loss:
                    # save the model weights
                    best_valid_loss = valid_loss
                    torch.save(model.state_dict(), os.path.join(model_checkpoints_folder, 'model.pth'))

                self.writer.add_scalar('validation_loss', valid_loss, global_step=valid_n_iter)
                valid_n_iter += 1
Пример #12
0
    def train(self, callback=lambda m, e, l: None):

        train_loader, valid_loader = self.dataset.get_data_loaders()

        model = ResNetSimCLR(**self.config["model"]).to(self.device)
        model = self._load_pre_trained_weights(model)

        optimizer = torch.optim.Adam(model.parameters(), 3e-4, weight_decay=eval(self.config['weight_decay']))

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0,
                                                               last_epoch=-1)

        if apex_support and self.config['fp16_precision']:
            model, optimizer = amp.initialize(model, optimizer,
                                              opt_level='O2',
                                              keep_batchnorm_fp32=True)
        else:
            print("No apex_support or config not fp16 precision")

        model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')

        # save config file
        _save_config_file(model_checkpoints_folder)

        n_iter = 0
        valid_n_iter = 0
        best_valid_loss = np.inf

        eval_freq = self.config['eval_every_n_epochs']
        num_epochs = self.config["epochs"]
        
        train_len = len(train_loader)
        valid_len = len(valid_loader)

        loop = tqdm(total=num_epochs * train_len, position=0)

        for epoch_counter in range(num_epochs):
            for it, ((xis, xjs), _) in enumerate(train_loader):
                optimizer.zero_grad()

                xis = xis.to(self.device)
                xjs = xjs.to(self.device)

                loss = self._step(model, xis, xjs, n_iter)

                if n_iter % self.config['log_every_n_steps'] == 0:
                    self.writer.add_scalar('train_loss', loss, global_step=n_iter)

                if apex_support and self.config['fp16_precision']:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                optimizer.step()
                n_iter += 1

                loop.update(1)
                loop.set_description(f"E {epoch_counter}/{num_epochs}, it: {it}/{train_len}, Loss: {loss.item()}")

            # validate the model if requested
            if epoch_counter % self.config['eval_every_n_epochs'] == 0:
                valid_loss = self._validate(model, valid_loader)
                callback(model, epoch_counter, valid_loss)
                if valid_loss < best_valid_loss:
                    # save the model weights
                    best_valid_loss = valid_loss
                    torch.save(model.state_dict(), os.path.join(model_checkpoints_folder,
                                                            f'{self.dataset.name}-model-{epoch_counter}.pth'))

                self.writer.add_scalar('validation_loss', valid_loss, global_step=valid_n_iter)
                valid_n_iter += 1

            # warmup for the first 10 epochs
            if epoch_counter >= 10:
                scheduler.step()
            self.writer.add_scalar('cosine_lr_decay', scheduler.get_lr()[0], global_step=n_iter)
Пример #13
0
    def train(self):

        train_loader, valid_loader = self.dataset.get_data_loaders()
        print(
            f'The current dataset has {self.dataset.get_train_length()} items')

        model = ResNetSimCLR(**self.config["model"]).to(self.device)
        model = self._load_pre_trained_weights(model)

        if self.device == self.cuda_name and self.config['allow_multiple_gpu']:
            gpu_count = torch.cuda.device_count()
            if gpu_count > 1:
                print(
                    f'There are {gpu_count} GPUs with the current setup, so we will run on all the GPUs'
                )
                model = torch.nn.DataParallel(model)

        optimizer = torch.optim.Adam(model.parameters(),
                                     3e-4,
                                     weight_decay=eval(
                                         self.config['weight_decay']))

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=len(train_loader), eta_min=0, last_epoch=-1)

        if apex_support and self.config['fp16_precision']:
            model, optimizer = amp.initialize(model,
                                              optimizer,
                                              opt_level='O2',
                                              keep_batchnorm_fp32=True)

        model_checkpoints_folder = os.path.join(self.writer.log_dir,
                                                'checkpoints')

        # save config file
        _save_config_file(model_checkpoints_folder)

        n_iter = 0
        valid_n_iter = 0
        best_valid_loss = np.inf

        for epoch_counter in range(self.config['epochs']):
            t1 = time.time()
            for (xis, xjs), _ in train_loader:
                optimizer.zero_grad()

                xis = xis.to(self.device)
                xjs = xjs.to(self.device)

                loss = self._step(model, xis, xjs, n_iter)

                if n_iter % self.config['log_every_n_steps'] == 0:
                    print(
                        f"Epoch {epoch_counter}. Loss = {loss}. Time: {time.strftime('%c', time.localtime())}."
                    )
                    self.writer.add_scalar('train_loss',
                                           loss,
                                           global_step=n_iter)

                if apex_support and self.config['fp16_precision']:
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
                        scaled_loss.backward()
                else:
                    loss.backward()

                optimizer.step()
                n_iter += 1

            # validate the model if requested
            if epoch_counter % self.config['eval_every_n_epochs'] == 0:
                valid_loss = self._validate(model, valid_loader)
                if valid_loss < best_valid_loss:
                    # save the model weights
                    best_valid_loss = valid_loss
                    torch.save(
                        model.state_dict(),
                        os.path.join(model_checkpoints_folder, 'model.pth'))
                    time_for_epoch = int(time.time() - t1)
                    print(f"===\n \
                            Epoch {epoch_counter}. Time for previous epoch: {time_for_epoch} seconds. Time to go: {((self.config['epochs'] - epoch_counter)*time_for_epoch)/60} minutes. Validation loss: {valid_loss}. Best valid loss: {best_valid_loss}\
                          \n===")

                self.writer.add_scalar('validation_loss',
                                       valid_loss,
                                       global_step=valid_n_iter)
                valid_n_iter += 1

            # warmup for the first 10 epochs
            if epoch_counter >= 10:
                scheduler.step()
            self.writer.add_scalar('cosine_lr_decay',
                                   scheduler.get_lr()[0],
                                   global_step=n_iter)
Пример #14
0
def main():
    args = parser.parse_args()
    assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2."
    # check if gpu training is available
    if not args.disable_cuda and torch.cuda.is_available():
        print("Using GPU")
        args.device = torch.device('cuda')
        cudnn.deterministic = True
        cudnn.benchmark = True
    else:
        args.device = torch.device('cpu')
        args.gpu_index = -1

    dataset = ContrastiveLearningDataset(args.data)

    train_dataset = dataset.get_dataset(args.dataset_name, args.n_views)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True,
                                               worker_init_fn=worker_init_fn)

    if args.dataset_name == "mnist":
        in_channels = 1
    else:
        in_channels = 3

    model = ResNetSimCLR(base_model=args.arch,
                         out_dim=args.out_dim,
                         in_channels=in_channels)

    if args.model_path is not None:
        checkpoint = torch.load(args.model_path, map_location=args.device)
        model.load_state_dict(checkpoint['state_dict'])

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=len(train_loader), eta_min=0, last_epoch=-1)

    head_dataset = HeadDataset(args.data)
    train_head_dataset = head_dataset.get_dataset(args.dataset_name,
                                                  train=True,
                                                  split="train")
    test_head_dataset = head_dataset.get_dataset(args.dataset_name,
                                                 train=False,
                                                 split="test")

    args.num_classes = head_dataset.get_num_classes(args.dataset_name)

    train_head_loader = torch.utils.data.DataLoader(train_head_dataset,
                                                    batch_size=args.batch_size,
                                                    shuffle=True,
                                                    num_workers=args.workers,
                                                    pin_memory=True,
                                                    drop_last=True)

    test_head_loader = torch.utils.data.DataLoader(test_head_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=args.workers,
                                                   pin_memory=True,
                                                   drop_last=True)

    #  It’s a no-op if the 'gpu_index' argument is a negative integer or None.
    with torch.cuda.device(args.gpu_index):
        if not (args.head_only or args.tsne_only):
            simclr = SimCLR(model=model,
                            optimizer=optimizer,
                            scheduler=scheduler,
                            args=args)
            model = simclr.train(train_loader)
        model.eval()
        if not args.tsne_only:
            headsimclr = SimCLRHead(model=model, args=args)
            headsimclr.train(train_head_loader, test_head_loader)

        tsne_plot = TSNE_project(model, test_head_loader, args)