Exemple #1
0
    def __init__(self):
        """
        :param self.train_frames: how many frame are used for training
        :param self.num_of_vid: number of videos
        :param self.extractor: pretrained model for feature extraction
        :param self.G: what I want to training model
        :param self.optimizerG: optimizer (default: Adam)
        :param self.train_data: training data loader
        :param self.device: gpu or cpu device(default: gpu)
        """
        self.train_frames = args.train_frames
        self.num_of_vid = args.num_of_vid
        print("===> Building model")
        # Model Setting
        # prtrained model setting
        self.extractor = resnet101(num_classes=400,
                                   shortcut_type='B',
                                   cardinality=32,
                                   sample_size=args.crop_size,
                                   sample_duration=args.train_frames)

        # load pretrained model
        # eval 모드를 무조건 해야됨!
        weight = get_pretrain_weight(pretrain_path, self.extractor)
        self.extractor.load_state_dict(weight)

        self.in_channel = args.in_channel
        self.G = BaseNet(self.in_channel * self.num_of_vid, self.train_frames,
                         self.num_of_vid)
        self.num_stage = args.num_stage

        # optimizer
        self.optimizerG = optim.Adam(self.G.parameters(),
                                     lr=args.lr,
                                     betas=(0.9, 0.999),
                                     eps=1e-8)
        # loss
        self.CE_loss = nn.CrossEntropyLoss()
        self.L1_loss = nn.L1Loss()
        self.L2_loss = nn.MSELoss()
        self.Softmax = nn.Softmax()
        # data
        self.train_data = train_data_loader
        # gpu
        self.device0 = device0
        self.device1 = device1

        #training
        self.epochs = args.epochs
        self.avg_G_loss_arr = []
        self.checkpoint = args.checkpoint
        self.checkpoint_set()
        self.lr_decay = args.lr_decay

        # cuda
        if torch.cuda.is_available():
            self.extractor.to(self.device0)
            self.G.cuda(self.device1)
class TrainModule(object):
    def __init__(self):
        """
        :param self.train_frames: how many frame are used for training
        :param self.num_of_vid: number of videos
        :param self.extractor: pretrained model for feature extraction
        :param self.G: what I want to training model
        :param self.optimizerG: optimizer (default: Adam)
        :param self.train_data: training data loader
        :param self.device: gpu or cpu device(default: gpu)
        """
        self.train_frames = args.train_frames
        self.num_of_vid = args.num_of_vid
        print("===> Building model")
        # Model Setting
        # prtrained model setting
        self.extractor = resnet101(num_classes=400,
                                   shortcut_type='B',
                                   cardinality=32,
                                   sample_size=args.crop_size,
                                   sample_duration=args.train_frames)

        # load pretrained model
        # eval 모드를 무조건 해야됨!
        weight = get_pretrain_weight(pretrain_path, self.extractor)
        self.extractor.load_state_dict(weight)

        self.in_channel = args.in_channel
        self.G = BaseNet(self.in_channel*self.num_of_vid, self.train_frames, self.num_of_vid)
        self.num_stage = args.num_stage

        # optimizer
        self.optimizerG = optim.Adam(self.G.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-8)
        # loss
        self.CE_loss = nn.CrossEntropyLoss()
        self.L1_loss = nn.L1Loss()
        self.L2_loss = nn.MSELoss()
        self.Softmax = nn.Softmax()
        # data
        self.train_data = train_data_loader
        # gpu
        self.device0 = device0
        self.device1 = device1

        #training
        self.epochs = args.epochs
        self.avg_G_loss_arr = []
        self.checkpoint = args.checkpoint
        self.checkpoint_set()
        self.lr_decay = args.lr_decay

        # cuda
        if torch.cuda.is_available():
            self.extractor.to(self.device0)
            self.G.cuda(self.device1)

    def train(self):
        # pretrained network on eval mode
        self.extractor.eval()
        # network on training mode
        self.G.train()

        for epoch in range(self.epochs):
            epoch_G_loss = 0.0
            for iter, batch in enumerate(self.train_data, 1):
                # you must setting data to cpu to gpu using .to(self.device)
                input, label = batch[0].to(self.device0), batch[1].to(self.device1)

                # input video가 8개인데 이걸 1개 1개 분리
                # pretrained model에 하나하나 집어넣어줌
                features = self.extractor(input[:, 0, :, :, :, :])
                for i in range(1, args.num_of_vid):
                    # check_gpu_mem(0)
                    # check_gpu_mem(1)
                    features = torch.cat((features,
                                          self.extractor(input[:, 0, :, :, :, :])), dim=1)

                features = features.to(self.device1)

                self.optimizerG.zero_grad()
                predict = self.G(features)

                G_loss = 0
                for i in range(self.train_frames):
                    G_loss += self.CE_loss(predict[:, i, :], label[:, i])

                G_loss.backward()
                self.optimizerG.step()

                epoch_G_loss += G_loss.data
                print("===> Epoch[{}]({}/{}): G_Loss: {:.4f} ".
                        format(epoch+1, iter, len(self.train_data), G_loss.item()))

                # clear gpu
                torch.cuda.empty_cache()

            avg_G_loss = epoch_G_loss / len(self.train_data)
            self.avg_G_loss_arr.append(avg_G_loss.item())

            # learning rate decay
            if (epoch + 1) % (self.lr_decay) == 0:
                for param_group in self.optimizerG.param_groups:
                    param_group['lr'] /= 2.0
                print('G: Learning rate decay: lr={}'.format(self.optimizerG.param_groups[0]['lr']))

            # checkpoint snapshot
            if (epoch+1) % self.checkpoint == 0:
                # save check point
                check_name = join(self.save_check_dir, 'epoch_' + str(epoch + 1) + 'checkpoint.pkl')
                torch.save(self.G, check_name)
                # make loss graph
                check_g_name = join(self.save_check_dir, 'epoch_' + str(epoch + 1) + '.png')
                make_graph(np.array(range(epoch + 1)), np.array(self.avg_G_loss_arr),
                           self.save_mname, check_g_name)

    def checkpoint_set(self):
        """
        Todo setting checkpoint argument
        :param self.save_mname: saved model name in each checkpoint
        :param self.log_dir: location of checkpoint saving folder
        :param self.save_check_dir: filename of checkpoint
        :param self.CSVlogger: CSVlogger setting for check a loss
        """
        # model save
        self.save_mname = args.save_model_name
        # checkpoint root
        make_dirs(log_path)
        self.log_dir = log_path + f'/{self.save_mname}'
        self.save_check_dir = make_dirs(self.log_dir)
        self.graph_dir = self.log_dir

        # CSV logging system
        self.CSVlogger = LogCSV(log_dir=self.log_dir + f"/{self.save_mname}_log.csv",
                                header=['epoch', 'avg_G_Loss', 'accuracy'])