def __init__(self, args):
        self.args = args

        self.saver = Saver(args)
        self.saver.save_experiment_config()

        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
            args, **kwargs)

        # self.model = OCRNet(self.nclass)
        self.model = build_model(2, [32, 32], '44330020')
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=args.lr,
                                         momentum=args.momentum,
                                         weight_decay=args.weight_decay,
                                         nesterov=args.nesterov)
        if args.use_balanced_weights:
            weight = torch.tensor([0.2, 0.8], dtype=torch.float32)
        else:
            weight = None
        self.criterion = SegmentationLosses(
            weight, cuda=args.cuda).build_loss(mode=args.loss_type)

        self.evaluator = Evaluator(self.nclass)
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs,
                                      len(self.train_loader))

        if args.cuda:
            self.model = self.model.cuda()

        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))

        if args.ft:
            args.start_epoch = 0
Beispiel #2
0
    def __init__(self, PATH):
        # Define Dataloader
        # word_vector = gensim.models.KeyedVectors.load_word2vec_format(conf.word_vector_dir+'model.vec', binary=False)
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
            16)

        print(pycolor.CYAN + "  Define Model." + pycolor.END)
        # Define network (****Change****)
        model = Modeling(embedding_dim=conf.embedding_dim,
                         c_out=conf.num_class,
                         c_hidden=conf.hidden_channel,
                         hidden_layer=conf.hidden_layer)

        model_state = torch.load(PATH)
        state_dict = model_state["state_dict"]
        print("epoch: {}".format(model_state["epoch"]))

        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:]  # remove `module.`
            new_state_dict[name] = v
        # load params
        model.load_state_dict(new_state_dict)

        # Define Criterion
        self.criterion = nn.CrossEntropyLoss(reduction="none")

        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)

        # Using cuda
        if True:
            #model = torch.nn.DataParallel(model, device_ids=[0])
            model = model.cuda()

        self.model = model
        self.predicts = []
        self.answers = []
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.writer = SummaryWriter(log_dir=self.saver.experiment_dir)

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.nclass = make_data_loader(
            args, **kwargs)

        model = None
        # Define network
        if self.args.dataset != 'Multi':
            model = ClassesNet(backbone=self.args.backbone,
                               num_classes=self.nclass,
                               pretrained=True)
            if self.args.dataset == 'Classes':
                print("Training ClassesNet")
            else:
                print("Training SpeciesNet")
        else:
            model = MultiNet(backbone=self.args.backbone,
                             num_classes=self.nclass,
                             pretrained=True)
            print("Training MultiNet")

        self.model = model

        train_params = [{'params': model.get_params()}]
        # Define Optimizer
        self.optimizer = torch.optim.Adam(train_params,
                                          self.args.learn_rate,
                                          weight_decay=args.weight_decay,
                                          amsgrad=args.nesterov)

        # Define Criterion
        self.criterion = nn.CrossEntropyLoss(size_average=True)

        # Define lr scheduler
        exp_lr_scheduler = lr_scheduler.StepLR(self.optimizer,
                                               step_size=1,
                                               gamma=0.1)

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))

        # Clear start epoch if fine-tuning
        if args.ft:
            args.start_epoch = 0
Beispiel #4
0
    def __init__(self, args):
        super(Trainer, self).__init__()
        self.args = args
        self.vis = Visualizer(env=args.checkname)
        self.saver = Checkpointer(args.checkname,
                                  args.saver_path,
                                  overwrite=False,
                                  verbose=True,
                                  timestamp=True,
                                  max_queue=args.max_save)

        self.model = LinkCrack()

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            self.model = self.model.cuda()
            self.device = torch.device("cuda")
        else:
            self.device = torch.device("cpu")

        if args.pretrained_model:
            self.model.load_state_dict(
                self.saver.load(self.args.pretrained_model, multi_gpu=True))
            self.vis.log('load checkpoint: %s' % self.args.pretrained_model,
                         'train info')

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
            args, **kwargs)

        if args.use_adam:
            self.optimizer = torch.optim.Adam(self.model.parameters(),
                                              lr=args.lr,
                                              weight_decay=args.weight_decay)
        else:
            self.optimizer = torch.optim.SGD(self.model.parameters(),
                                             lr=args.lr,
                                             momentum=args.momentum,
                                             weight_decay=args.weight_decay)

        self.iter_counter = 0

        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs,
                                      len(self.train_loader))

        # -------------------- Loss --------------------- #

        self.mask_loss = nn.BCEWithLogitsLoss(
            reduction='mean',
            pos_weight=torch.cuda.FloatTensor([args.pos_pixel_weight]))
        self.connected_loss = nn.BCEWithLogitsLoss(
            reduction='mean',
            pos_weight=torch.cuda.FloatTensor([args.pos_link_weight]))

        self.loss_weight = args.loss_weight

        # logger
        self.log_loss = {}
        self.log_acc = {}
        self.save_pos_acc = -1
        self.save_acc = -1
Beispiel #5
0
    def __init__(self, batch_size=32, optimizer_name="Adam", lr=1e-3, weight_decay=1e-5,
                 epochs=200, model_name="model01", gpu_ids=None, resume=None, tqdm=None):
        """
        args:
            batch_size = (int) batch_size of training and validation
            lr = (float) learning rate of optimization
            weight_decay = (float) weight decay of optimization
            epochs = (int) The number of epochs of training
            model_name = (string) The name of training model. Will be folder name.
            gpu_ids = (List) List of gpu_ids. (e.g. gpu_ids = [0, 1]). Use CPU, if it is None. 
            resume = (Dict) Dict of some settings. (resume = {"checkpoint_path":PATH_of_checkpoint, "fine_tuning":True or False}). 
                     Learn from scratch, if it is None.
            tqdm = (tqdm Object) progress bar object. Set your tqdm please.
                   Don't view progress bar, if it is None.
        """
        # Set params
        self.batch_size = batch_size
        self.epochs = epochs
        self.start_epoch = 0
        self.use_cuda = (gpu_ids is not None) and torch.cuda.is_available
        self.tqdm = tqdm
        self.use_tqdm = tqdm is not None
        # ------------------------- #
        # Define Utils. (No need to Change.)
        """
        These are Project Modules.
        You may not have to change these.
        
        Saver: Save model weight. / <utils.saver.Saver()>
        TensorboardSummary: Write tensorboard file. / <utils.summaries.TensorboardSummary()>
        Evaluator: Calculate some metrics (e.g. Accuracy). / <utils.metrics.Evaluator()>
        """
        ## ***Define Saver***
        self.saver = Saver(model_name, lr, epochs)
        self.saver.save_experiment_config()
        
        ## ***Define Tensorboard Summary***
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()
        
        # ------------------------- #
        # Define Training components. (You have to Change!)
        """
        These are important setting for training.
        You have to change these.
        
        make_data_loader: This creates some <Dataloader>s. / <dataloader.__init__>
        Modeling: You have to define your Model. / <modeling.modeling.Modeling()>
        Evaluator: You have to define Evaluator. / <utils.metrics.Evaluator()>
        Optimizer: You have to define Optimizer. / <utils.optimizer.Optimizer()>
        Loss: You have to define Loss function. / <utils.loss.Loss()>
        """
        ## ***Define Dataloader***
        self.train_loader, self.val_loader, self.test_loader, self.num_classes = make_data_loader(batch_size)
        
        ## ***Define Your Model***
        self.model = Modeling(self.num_classes)
        
        ## ***Define Evaluator***
        self.evaluator = Evaluator(self.num_classes)
        
        ## ***Define Optimizer***
        self.optimizer = Optimizer(self.model.parameters(), optimizer_name=optimizer_name, lr=lr, weight_decay=weight_decay)
        
        ## ***Define Loss***
        self.criterion = Loss()
        
        # ------------------------- #
        # Some settings
        """
        You don't have to touch bellow code.
        
        Using cuda: Enable to use cuda if you want.
        Resuming checkpoint: You can resume training if you want.
        """
        ## ***Using cuda***
        if self.use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=gpu_ids).cuda()

        ## ***Resuming checkpoint***
        """You can ignore bellow code."""
        self.best_pred = 0.0
        if resume is not None:
            if not os.path.isfile(resume["checkpoint_path"]):
                raise RuntimeError("=> no checkpoint found at '{}'" .format(resume["checkpoint_path"]))
            checkpoint = torch.load(resume["checkpoint_path"])
            self.start_epoch = checkpoint['epoch']
            if self.use_cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if resume["fine_tuning"]:
                # resume params of optimizer, if run fine tuning.
                self.optimizer.load_state_dict(checkpoint['optimizer'])
                self.start_epoch = 0
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(resume["checkpoint_path"], checkpoint['epoch']))