def __init__(self, opt): super(DANN_resnet50, self).__init__() print('===============> DANN_resnet50 task! <===============') self.opt = opt ######################################### # # Feature Extrator # ######################################### pretrained_dict = torch.load('/media/b3-542/196AE2835A1F87B0/HeHai/Models/resnet50-19c8e357.pth') self.feature_extrator = network.resnet50() model_dict = self.feature_extrator.state_dict() pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} model_dict.update(pretrained_dict) self.feature_extrator.load_state_dict(model_dict) ######################################### # # Label Classifier # ######################################### self.class_classifier = nn.Sequential() self.class_classifier.add_module('c_fc', nn.Linear(2048 * 4, 31))#opt.num_classes)) self.class_classifier.apply(utils.weights_init) ######################################### # # Domain Classifier # ######################################### self.domain_classifier = nn.Sequential() self.domain_classifier.add_module('d_fc1', nn.Linear(2048*4, 1024)) self.domain_classifier.add_module('d_bn1', nn.BatchNorm2d(1024)) self.domain_classifier.add_module('d_relu1', nn.ReLU(True)) self.domain_classifier.add_module('d_fc2', nn.Linear(1024, 256)) self.domain_classifier.add_module('d_bn2', nn.BatchNorm2d(256)) self.domain_classifier.add_module('d_relu2', nn.ReLU(True)) self.domain_classifier.add_module('d_fc3', nn.Linear(256, 1)) self.domain_classifier.add_module('d_sm', nn.Sigmoid()) self.domain_classifier.apply(utils.weights_init)
def __init__(self): super(MySalEncoder, self).__init__() # the image is downsampled by resnet with 4 times, so feat_stride is set to 4 here. self.feat_stride = 3.5 # 1.75 model_path = './models/' resnet = resnet50(pretrained=True, modelpath=model_path, num_classes=1000) # the extractor (resnet) need not to be updated, so only roi_head in encoder is updated. # in demo "encoder_optimizer", the parameters in self.roi_head is inserted into computing graph, # encoder_optimizer = optim.Adam(EncoderModel.roi_head.parameters(), lr=0.001, ... # ...betas=(0.9, 0.999), eps=1e-08, weight_decay=0) self.extractor = nn.Sequential(resnet) self.dconv = nn.ConvTranspose2d(1024, 1024, kernel_size=3, stride=2) self.dconv.cuda() self.relu = nn.ReLU(inplace=True) self.roi_head = ROIHead(roi_size=(3, 3), feat_stride=self.feat_stride).cuda() self.extractor.cuda() self.roi_head.cuda()
]) data = data_loader.read_data(opts, folder_names[0]) train_x, test_x = data.train_test_split() train_data = data_loader.CelebA_DataLoader(train_x, folder_names[0], transform=transform, attribute_file=attribute_file, size=opts.resize, randomcrop=opts.image_shape) trainloader = DataLoader(train_data, batch_size=opts.batch, shuffle=True, num_workers=4) test_data = data_loader.CelebA_DataLoader(test_x, folder_names[0], transform=transform, attribute_file=attribute_file, size=opts.resize, randomcrop=opts.image_shape) testloader = DataLoader(train_data, batch_size=256, shuffle=True, num_workers=8) from network import resnet50 from train import GAN_Trainer from test import Gan_Tester '''Discriminator''' D = resnet50().to(device) '''Optimizers''' import torch.optim as optim D_optim = optim.Adam(D.parameters(), lr=opts.lr, betas=(opts.beta1, opts.beta2)) '''run training''' trainer = GAN_Trainer(opts, D, D_optim, trainloader) trainer.train() tester = Gan_Tester(opts, D, testloader, checkpoint='./model/checkpoint_-1.pth') tester.test()
num_workers = 2 # loading dadta print("Loading dataset...") test_data = Hand(data_root, train=False) train_data = Hand(data_root, train=True) test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=num_workers) print('test dataset len: {}'.format(len(test_dataloader.dataset))) train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=False, num_workers=num_workers) print('train dataset len: {}'.format(len(train_dataloader.dataset))) # models # model = resnet34(pretrained=False, num_classes=1000) # batch_size=120, 1GPU Memory < 7000M # model.fc = nn.Linear(512, 6) # model = resnet101(pretrained=False, num_classes=1000) # batch_size=60, 1GPU Memory > 9000M # model.fc = nn.Linear(512*4, 6) model = resnet50(pretrained=False, num_classes=1000) model.fc = nn.Linear(512 * 4, 6) # Test tester = Tester(model, params, test_dataloader, train_dataloader) print('test dataset len: {}'.format(len(test_dataloader.dataset))) tester.accuracy_for_testdata() tester.accuracy_for_traindata()
def build_net(self): self.Retinanet_model = resnet50(cfgs.ClsNum) self.Detector = InferDetector()
piecewise decay to the initial learning rate ''' batch_size = learning_params['batch_size'] step_per_epoch = int(math.ceil(read_params['input_num'] / batch_size)) # learning_rate = learning_params['learning_rate'] boundaries = [i * step_per_epoch for i in learning_params['epochs']] values = [i * learning_rate for i in learning_params['lr_decay']] learning_rate = fluid.layers.piecewise_decay(boundaries, values) optimizer = fluid.optimizer.AdamOptimizer(learning_rate=learning_rate, parameter_list=parameter_list) return optimizer with fluid.dygraph.guard(place): resnet = resnet50(False, num_classes=5) optimizer = get_adam_optimizer(resnet.parameters()) if learning_params['pretrained']: params, _ = fluid.load_dygraph(learning_params['pretrain_params_path']) resnet.set_dict(params) with fluid.dygraph.guard(place): resnet.train() train_list = os.path.join(read_params['data_dir'], read_params['train_list']) train_reader = fluid.io.batch(custom_img_reader(train_list, mode='train'), batch_size=learning_params['batch_size']) train_loader = fluid.io.DataLoader.from_generator(capacity=3, return_list=True, use_multiprocess=False) train_loader.set_sample_list_generator(train_reader, places=place)