コード例 #1
0
    def __init__(self, options, path):
        self.options = options
        self.path = path
        self.device = options['device']

        print('Starting to prepare network and data...')

        self.net = nn.DataParallel(self._net_choice(
            self.options['net_choice'])).to(self.device)
        #self.net.load_state_dict(torch.load('/home/zhangyongshun/se_base_model/model_save/ResNet/backup/epoch120/ResNet50-finetune_fc_cub.pkl'))
        print('Network is as follows:')
        print(self.net)
        #print(self.net.state_dict())
        self.criterion = nn.CrossEntropyLoss()
        self.solver = torch.optim.SGD(
            self.net.parameters(),
            lr=self.options['base_lr'],
            momentum=self.options['momentum'],
            weight_decay=self.options['weight_decay'])
        self.schedule = torch.optim.lr_scheduler.StepLR(self.solver,
                                                        step_size=30,
                                                        gamma=0.1)
        #self.schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(
        #    self.solver, mode='max', factor=0.1, patience=3, verbose=True, threshold=1e-4
        #)

        train_transform_list = [
            transforms.RandomResizedCrop(self.options['img_size']),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225))
        ]
        test_transforms_list = [
            transforms.Resize(int(self.options['img_size'] / 0.875)),
            transforms.CenterCrop(self.options['img_size']),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225))
        ]
        train_data = cub200(self.path['data'],
                            train=True,
                            transform=transforms.Compose(train_transform_list))
        test_data = cub200(self.path['data'],
                           train=False,
                           transform=transforms.Compose(test_transforms_list))
        self.train_loader = torch.utils.data.DataLoader(
            train_data,
            batch_size=self.options['batch_size'],
            shuffle=True,
            num_workers=4,
            pin_memory=True)
        self.test_loader = torch.utils.data.DataLoader(test_data,
                                                       batch_size=16,
                                                       shuffle=False,
                                                       num_workers=4,
                                                       pin_memory=True)
    def __init__(self, options, path):
        self.options = options
        self.path = path
        self.device = options['device']

        print('Starting to prepare network and data...')
        net_params = BCNN(is_all=True, num_classes=200)
        self.net = nn.DataParallel(net_params).to(self.device)
        if stage == "train_all":
            self.net.load_state_dict(torch.load('./models/saved/VGG/VGG_bilinear_wonorm_train_last_layer.pkl'))
            1
        else:
            for param in net_params.parameters():
                param.requires_grad = False
            for param in net_params.classifier.parameters():
                param.requires_grad = True
        print('Network is as follows:')
        print(self.net)
        #print(self.net.state_dict())
        self.criterion = nn.CrossEntropyLoss()
        if stage == "train_all":
            self.solver = torch.optim.SGD(self.net.parameters(), lr=self.options['base_lr'], momentum=self.options['momentum'], weight_decay=self.options['weight_decay'])
        else:
            self.solver = torch.optim.SGD(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.options['base_lr'], momentum=self.options['momentum'], weight_decay=self.options['weight_decay'])
        # self.schedule = torch.optim.lr_scheduler.StepLR(self.solver, step_size=50, gamma=0.25)
        self.schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.solver, mode='max', factor=0.25, patience=5, verbose=True,
            threshold=1e-4)
        if stage == "train_all":
            self.schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(
                self.solver, mode='max', factor=0.1, patience=5, verbose=True,
                threshold=1e-4)

        train_transform_list = [
            transforms.Resize(512),
            transforms.RandomCrop(448),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225))
        ]
        test_transforms_list = [
            transforms.Resize(512),
            transforms.CenterCrop(self.options['img_size']),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225))
        ]
        train_data = cub200(self.path['data'], train=True, transform=transforms.Compose(train_transform_list))
        test_data = cub200(self.path['data'], train=False, transform=transforms.Compose(test_transforms_list))
        self.train_loader = torch.utils.data.DataLoader(
            train_data, batch_size=self.options['batch_size'], shuffle=True, num_workers=4, pin_memory=True
        )
        self.test_loader = torch.utils.data.DataLoader(
            test_data, batch_size=16, shuffle=False, num_workers=4, pin_memory=True
        )
コード例 #3
0
ファイル: trainer.py プロジェクト: abcbdf/resnet_finetune_cub
    def __init__(self, options, path):
        self.options = options
        self.path = path
        self.device = options['device']

        print('Starting to prepare network and data...')
        # print(self.device)
        # exit()
        self.net = self._net_choice(self.options['net_choice']).to(self.device)
        if self.options["test"]:
            load_data = torch.load('./model_save/ResNet/ResNet50.pkl', map_location = self.device)
            self.net.load_state_dict(load_data)
        print('Network is as follows:')
        print(self.net)
        #print(self.net.state_dict())
        self.criterion = nn.CrossEntropyLoss()
        self.solver = torch.optim.SGD(
            self.net.parameters(), lr=self.options['base_lr'], momentum=self.options['momentum'], weight_decay=self.options['weight_decay']
        )
        self.schedule = torch.optim.lr_scheduler.StepLR(self.solver, step_size=30, gamma=0.1)
        #self.schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(
        #    self.solver, mode='max', factor=0.1, patience=3, verbose=True, threshold=1e-4
        #)

        train_transform_list = [
            transforms.RandomResizedCrop(self.options['img_size']),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225))
        ]
        test_transforms_list = [
            transforms.Resize(int(self.options['img_size']/0.875)),
            transforms.CenterCrop(self.options['img_size']),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225))
        ]
        if (self.options['dataset'] == "cub"):
            train_data = cub200(self.path['data'], train=True, transform=transforms.Compose(train_transform_list))
            test_data = cub200(self.path['data'], train=False, transform=transforms.Compose(test_transforms_list))
        elif (self.options['dataset'] == "flo"):
            print("flower dataset")
            train_data = flower(self.path['data'], train=True, transform=transforms.Compose(train_transform_list))
            test_data = flower(self.path['data'], train=False, transform=transforms.Compose(test_transforms_list))

        exit()
        self.train_loader = torch.utils.data.DataLoader(
            train_data, batch_size=self.options['batch_size'], shuffle=True, num_workers=4, pin_memory=True
        )
        self.test_loader = torch.utils.data.DataLoader(
            test_data, batch_size=16, shuffle=False, num_workers=4, pin_memory=True
        )
        self.activation = {}