Example #1
0
    def __init__(self, class_num, bottleneck_dim=256, type="linear"):
        super(feat_classifier_twin, self).__init__()
        self.type = type
        # if type == 'wn':
        #     self.fc1 = weightNorm(nn.Linear(bottleneck_dim, class_num), name="weight")
        #     self.fc2 = weightNorm(nn.Linear(bottleneck_dim, class_num), name="weight")
        #     self.fc1.apply(init_weights)
        #     self.fc2.apply(init_weights)
        # else:
        #     self.fc1 = nn.Linear(bottleneck_dim, class_num)
        #     self.fc2 = nn.Linear(bottleneck_dim, class_num)
        #     self.fc1.apply(init_weights)
        #     self.fc2.apply(init_weights)

        # self.fc0_a = nn.Linear(bottleneck_dim, bottleneck_dim)
        # self.fc0_b = nn.Linear(bottleneck_dim, bottleneck_dim)

        # self.relu_a = nn.ReLU(inplace=True)
        # self.relu_b = nn.ReLU(inplace=True)

        # self.dropout_a = nn.Dropout(0.5)
        # self.dropout_b = nn.Dropout(0.5)

        if type == 'wn':
            self.fc_a = weightNorm(nn.Linear(bottleneck_dim, class_num),
                                   name="weight")
            self.fc_b = weightNorm(nn.Linear(bottleneck_dim, class_num),
                                   name="weight")
        else:
            self.fc_a = nn.Linear(bottleneck_dim, class_num)
            self.fc_b = nn.Linear(bottleneck_dim, class_num)

        self.fc_a.apply(init_weights)
        self.fc_b.apply(init_weights)
Example #2
0
    def __init__(self, in_planes, planes, stride=1):
        super(Bottleneck, self).__init__()
        self.conv1 = weightNorm(
            nn.Conv2d(in_planes, planes, kernel_size=1, bias=True))
        self.conv2 = weightNorm(
            nn.Conv2d(planes,
                      planes,
                      kernel_size=3,
                      stride=stride,
                      padding=1,
                      bias=True))
        self.conv3 = weightNorm(
            nn.Conv2d(planes,
                      self.expansion * planes,
                      kernel_size=1,
                      bias=True))
        self.relu_1 = TReLU()
        self.relu_2 = TReLU()
        self.relu_3 = TReLU()

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                weightNorm(
                    nn.Conv2d(in_planes,
                              self.expansion * planes,
                              kernel_size=1,
                              stride=stride,
                              bias=True)), )
Example #3
0
 def __init__(self):
     super(Net, self).__init__()
     ### we use weight normalization after each convolutions and linear transfrom
     self.conv1 = weightNorm(nn.Conv2d(3, 6, 5), name="weight")
     #print (self.conv1._parameters.keys())
     self.pool = nn.MaxPool2d(2, 2)
     self.conv2 = weightNorm(nn.Conv2d(6, 16, 5), name="weight")
     self.fc1 = weightNorm(nn.Linear(16 * 5 * 5, 120), name="weight")
     self.fc2 = weightNorm(nn.Linear(120, 84), name="weight")
     self.fc3 = weightNorm(nn.Linear(84, 10), name="weight")
Example #4
0
    def __init__(self):
        super(Discriminator, self).__init__()

        self.conv0 = weightNorm(nn.Conv2d(6, 16, 5, 2, 2))
        self.conv1 = weightNorm(nn.Conv2d(16, 32, 5, 2, 2))
        self.conv2 = weightNorm(nn.Conv2d(32, 64, 5, 2, 2))
        self.conv3 = weightNorm(nn.Conv2d(64, 128, 5, 2, 2))
        self.conv4 = weightNorm(nn.Conv2d(128, 1, 1, 1, 0))
        self.relu0 = TReLU()
        self.relu1 = TReLU()
        self.relu2 = TReLU()
        self.relu3 = TReLU()
Example #5
0
    def __init__(self,
                 state_size,
                 action_size,
                 seed,
                 fc1_units=64,
                 fc2_units=64):
        super(QNetworkWN, self).__init__()
        self.seed = torch.manual_seed(seed)

        self.fc1 = weightNorm(nn.Linear(state_size, fc1_units), name="weight")
        self.fc2 = weightNorm(nn.Linear(fc1_units, fc2_units), name="weight")
        self.fc3 = weightNorm(nn.Linear(fc2_units, action_size), name="weight")
Example #6
0
    def __init__(self, args):
        super(Discriminator_ac_wgan, self).__init__()

        self.conv0 = weightNorm(nn.Conv2d(3, 16, 5, 2, 2))
        self.conv1 = weightNorm(nn.Conv2d(16, 32, 5, 2, 2))
        self.conv2 = weightNorm(nn.Conv2d(32, 64, 5, 2, 2))
        self.conv3 = weightNorm(nn.Conv2d(64, 128, 5, 2, 2))
        self.conv4 = weightNorm(nn.Conv2d(128, 1, 5, 2, 2))

        self.relu0 = TReLU()
        self.relu1 = TReLU()
        self.relu2 = TReLU()
        self.relu3 = TReLU()

        self.aux_layer = nn.Linear(128 * 8 * 8, args.num_class)
Example #7
0
    def __init__(self, num_classes=10, num_channels=3):
        super(Classifier_DTN, self).__init__()

        self.conv_layers = nn.Sequential(
            nn.Conv2d(in_channels=num_channels,
                      out_channels=64,
                      kernel_size=5,
                      stride=2,
                      padding=2), nn.BatchNorm2d(64), nn.Dropout2d(0.1),
            nn.ReLU(),
            nn.Conv2d(in_channels=64,
                      out_channels=128,
                      kernel_size=5,
                      stride=2,
                      padding=2), nn.BatchNorm2d(128), nn.Dropout2d(0.3),
            nn.ReLU(),
            nn.Conv2d(in_channels=128,
                      out_channels=256,
                      kernel_size=5,
                      stride=2,
                      padding=2), nn.BatchNorm2d(256), nn.Dropout2d(0.5),
            nn.ReLU())

        self.fc1 = nn.Linear(in_features=256 * 4 * 4, out_features=256)
        self.drop = nn.Dropout(0.5)
        self.bn = nn.BatchNorm1d(256)
        self.fc2 = weightNorm(
            nn.Linear(in_features=256, out_features=num_classes))

        self.conv_layers.apply(init_weights)
        self.fc1.apply(init_weights)
        self.fc2.apply(init_weights)

        self.lossfunction = nn.CrossEntropyLoss()
Example #8
0
    def __init__(self, cfg, model_cfg, num_classes, classifier_type, **kwargs):
        super().__init__()
        self.backbone = build_backbone(
            model_cfg.BACKBONE.NAME,
            verbose=cfg.VERBOSE,
            pretrained=model_cfg.BACKBONE.PRETRAINED,
            **kwargs)
        fdim = self.backbone.out_features

        self.head = None
        if model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS:
            self.head = build_head(model_cfg.HEAD.NAME,
                                   verbose=cfg.VERBOSE,
                                   in_features=fdim,
                                   hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS,
                                   activation=model_cfg.HEAD.ACTIVATION,
                                   bn=model_cfg.HEAD.BN,
                                   dropout=model_cfg.HEAD.DROPOUT,
                                   **kwargs)
            fdim = self.head.out_features

        self.classifier = None
        if num_classes > 0:
            if classifier_type == 'linear':
                self.classifier = nn.Linear(fdim, num_classes)
            elif classifier_type == 'weight':
                self.classifier = weightNorm(nn.Linear(fdim, num_classes))

        self._fdim = fdim
Example #9
0
def conv3x3(in_planes, out_planes, stride=1):
    return weightNorm(
        nn.Conv2d(in_planes,
                  out_planes,
                  kernel_size=3,
                  stride=stride,
                  padding=1,
                  bias=True))
 def __init__(self, class_num, bottleneck_dim=256, type="linear"):
     super(feat_classifier, self).__init__()
     if type == "linear":
         self.fc = nn.Linear(bottleneck_dim, class_num)
     else:
         self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num),
                              name="weight")
     self.fc.apply(init_weights)
 def __init__(self, class_num, bottleneck_dim=256, type="linear"):
     super(feat_classifier, self).__init__()
     if type == "linear":
         self.fc = nn.Linear(bottleneck_dim, class_num)
     else:
         # 这个weightNorm就是论文里面讲解最后提到的batchNorm和WeightNorm,居然这么容易就实现了
         self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num),
                              name="weight")
     self.fc.apply(init_weights)
Example #12
0
    def __init__(self, num_inputs, depth, num_outputs):
        super(ResNet_wobn, self).__init__()
        self.in_planes = 64

        block, num_blocks = cfg(depth)
        self.conv0 = conv3x3(num_inputs, 32, 2)  # 64
        self.layer1 = self._make_layer(block, 64, num_blocks[0],
                                       stride=2)  # 32
        self.layer2 = self._make_layer(block, 128, num_blocks[1],
                                       stride=2)  # 16
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=1)
        self.conv4 = weightNorm(nn.Conv2d(512, 1, 1, 1, 0))
        self.relu_1 = TReLU()
        self.conv1 = weightNorm(nn.Conv2d(65 + 2, 64, 1, 1, 0))
        self.conv2 = weightNorm(nn.Conv2d(64, 64, 1, 1, 0))
        self.conv3 = weightNorm(nn.Conv2d(64, 32, 1, 1, 0))
        self.relu_2 = TReLU()
        self.relu_3 = TReLU()
        self.relu_4 = TReLU()
Example #13
0
 def __init__(self, class_num, bottleneck_dim=256, type="linear"):
     super(feat_classifier, self).__init__()
     self.type = type
     if type == 'wn':
         self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num), name="weight")
         self.fc.apply(init_weights)
     elif type == 'linear':
         self.fc = nn.Linear(bottleneck_dim, class_num)
         self.fc.apply(init_weights)
     else:
         self.fc = nn.Linear(bottleneck_dim, class_num, bias=False)
         nn.init.xavier_normal_(self.fc.weight)
Example #14
0
    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = conv3x3(in_planes, planes, stride)
        self.conv2 = conv3x3(planes, planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                weightNorm(
                    nn.Conv2d(in_planes,
                              self.expansion * planes,
                              kernel_size=1,
                              stride=stride,
                              bias=True)), )
        self.relu_1 = TReLU()
        self.relu_2 = TReLU()
Example #15
0
    def __init__(self,
                 resnet_name,
                 use_bottleneck=True,
                 bottleneck_dim=256,
                 new_cls=False,
                 class_num=1000):
        super(ResNetFc, self).__init__()
        model_resnet = resnet_dict[resnet_name](pretrained=True)
        self.conv1 = model_resnet.conv1
        self.bn1 = model_resnet.bn1
        self.relu = model_resnet.relu
        self.maxpool = model_resnet.maxpool
        self.layer1 = model_resnet.layer1
        self.layer2 = model_resnet.layer2
        self.layer3 = model_resnet.layer3
        self.layer4 = model_resnet.layer4
        self.avgpool = model_resnet.avgpool
        self.feature_layers = nn.Sequential(self.conv1, self.bn1, self.relu, self.maxpool, \
                                            self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool)

        self.use_bottleneck = use_bottleneck
        self.new_cls = new_cls
        # print("classes inside network",new_cls)
        if new_cls:
            if self.use_bottleneck:
                print(bottleneck_dim)
                self.bottleneck = nn.Linear(model_resnet.fc.in_features,
                                            bottleneck_dim)
                # self.fc = nn.Linear(bottleneck_dim, class_num)
                self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num),
                                     name="weight")
                # self.fc = nn.Linear(bottleneck_dim, class_num)
                self.bn = nn.BatchNorm1d(bottleneck_dim, affine=True)

                self.bottleneck.apply(init_weights)
                self.fc.apply(init_weights)
                self.__in_features = bottleneck_dim
            else:
                self.fc = nn.Linear(model_resnet.fc.in_features, class_num)
                self.fc.apply(init_weights)
                self.__in_features = model_resnet.fc.in_features
        else:
            self.fc = model_resnet.fc
            self.__in_features = model_resnet.fc.in_features
Example #16
0
 def __init__(self, class_num, bottleneck_dim=256, type="linear", bias=True, temp=0.1, args=None):
     super(feat_classifier, self).__init__()
     self.type = type
     if type == 'wn':
         self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num, bias=bias), name="weight")
         self.fc.apply(init_weights)
     elif type == 'angular':
         self.fc = nn.Linear(bottleneck_dim, class_num, bias=False)
         self.fc.apply(init_weights)
         self.temp = temp
     elif type == 'add_margin':
         self.fc = AddMarginProduct(bottleneck_dim, class_num, args.metric_s, args.metric_m)
     elif type == 'arc_margin':
         self.fc = ArcMarginProduct(bottleneck_dim, class_num, args.metric_s, args.metric_m, args.easy_margin)
     elif type == 'sphere':
         self.fc = SphereProduct(bottleneck_dim, class_num, args.metric_m)
     else:
         self.fc = nn.Linear(bottleneck_dim, class_num, bias=bias)
         self.fc.apply(init_weights)
Example #17
0
 def __init__(self, num_classes=2):
     super(Classifier, self).__init__()
     self.fc = weightNorm(nn.Linear(2048, num_classes), name="weight")
     self.fc.apply(init_weights)
Example #18
0
    def _make_layers(self, cfg, with_bn=False):
        layers = []
        in_channels = 3
        for idx, x in enumerate(cfg):
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                if with_bn == 'dual':  # deprecated
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        DualNorm(x),
                        nn.ReLU(inplace=True),
                        DualAffine(x),
                    ]
                elif with_bn == 'bn':
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        nn.BatchNorm2d(x),
                        nn.ReLU(inplace=True)
                    ]
                elif with_bn == 'bn_v2':
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        nn.ReLU(inplace=True),
                        nn.BatchNorm2d(x)
                    ]
                elif with_bn == 'bn_population':
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        BatchNorm2d(x),
                        nn.ReLU(inplace=True)
                    ]

                elif with_bn == 'brn':
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        BatchRenorm2d(x),
                        nn.ReLU(inplace=True)
                    ]

                elif with_bn == 'constraint_bn_v2':
                    if idx == 0:
                        layers += [
                            nn.Conv2d(in_channels, x, kernel_size=3,
                                      padding=1),
                            nn.ReLU(inplace=True)
                        ]
                    else:
                        layers += [
                            Constraint_Norm2d(in_channels,
                                              pre_affine=True,
                                              post_affine=True),
                            #Constraint_Affine2d(in_channels),
                            nn.Conv2d(in_channels, x, kernel_size=3,
                                      padding=1),
                            nn.ReLU(inplace=True)
                        ]
                elif with_bn == 'constraint_bn_v3':
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        Constraint_Norm2d(x, pre_affine=True,
                                          post_affine=True),
                        nn.ReLU(inplace=True)
                    ]
                elif with_bn == 'gn':
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        nn.GroupNorm(32, x, affine=True),
                        nn.ReLU(inplace=True)
                    ]
                elif with_bn == 'constraint_bn_v2_no_affine':
                    if idx == 0:
                        layers += [
                            nn.Conv2d(in_channels, x, kernel_size=3,
                                      padding=1),
                            nn.ReLU(inplace=True)
                        ]
                    else:
                        layers += [
                            Constraint_Norm2d(in_channels,
                                              pre_affine=False,
                                              post_affine=False),
                            #Constraint_Affine2d(in_channels),
                            nn.Conv2d(in_channels, x, kernel_size=3,
                                      padding=1),
                            nn.ReLU(inplace=True)
                        ]

                elif with_bn == 'in':
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        InstanceNorm2d(x, affine=True),
                        nn.ReLU(inplace=True),
                    ]
                elif with_bn == 'mabn':
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        MABN2d(x),
                        nn.ReLU(inplace=True)
                    ]
                elif with_bn == 'wn':
                    layers += [
                        weightNorm(nn.Conv2d(in_channels,
                                             x,
                                             kernel_size=3,
                                             padding=1,
                                             bias=True),
                                   name="weight"),
                        nn.ReLU(inplace=True)
                    ]
                elif with_bn == 'mabn_cen':
                    layers += [
                        Conv_Cen2d(in_channels, x, kernel_size=3, padding=1),
                        MABN2d(x),
                        nn.ReLU(inplace=True),
                    ]

                else:
                    layers += [
                        nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                        nn.ReLU(inplace=True)
                    ]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)
Example #19
0
    def train(self):
        torch.multiprocessing.set_sharing_strategy('file_system')
        args = arg_parser()
        logger = log()
        model_root = './model_source'
        if not os.path.exists(model_root):
            os.mkdir(model_root)
        time_stamp_launch = time.strftime('%Y%m%d') + '-' + time.strftime(
            '%H%M')
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        n_gpus = len(args.gpu.split(','))

        # set parameters
        path = args.data_root
        label_file = args.label_file
        batch_size = args.batchsize
        epochs = args.max_epoch
        best_acc = 0

        dataset_name = path.split('/')[-2]

        logger.info(
            path.split('/')[-2] + '_' + time_stamp_launch +
            'model : resnet101  lr: %s' % args.lr)
        logger.info('dataset is: ' + dataset_name)

        net = resnet101(pretrained=True)
        input_dim = net.fc.in_features
        net.fc = weightNorm(nn.Linear(input_dim, 12), name="weight")
        net = net.cuda()

        param_group = []
        for k, v in net.named_parameters():
            if k[:2] == 'fc':
                param_group += [{'params': v, 'lr': args.lr}]
            else:
                param_group += [{'params': v, 'lr': args.lr * 0.1}]

        loss = CrossEntropyLabelSmooth(num_classes=12).cuda()

        optimizer = optim.SGD(param_group, momentum=0.9, weight_decay=5e-4)
        scheduler = MultiStepLR(optimizer,
                                milestones=args.MultiStepLR,
                                gamma=0.1)

        # training dataset
        transform_train = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop((224, 224)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406),
                                 (0.229, 0.224, 0.225)),  # grayscale mean/std
        ])
        # train_dataset = AsoctDataset(path, label_file, args.imgs_per_volume, train=True, transform=transform_train)
        train_dataset = visDataset(path,
                                   label_file,
                                   train=True,
                                   transform=transform_train)
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=batch_size,
            shuffle=True,
            num_workers=2 * n_gpus if n_gpus <= 2 else 2)

        transform_test = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406),
                                 (0.229, 0.224, 0.225)),  # grayscale mean/std
        ])
        val_dataset = visDataset(path,
                                 label_file,
                                 train=False,
                                 transform=transform_test)
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 num_workers=2 *
                                                 n_gpus if n_gpus <= 2 else 2)

        for i in range(epochs):
            accnum = 0.0
            total = 0.0
            running_loss = []
            net.train()

            for j, (img_data, img_label, ind) in enumerate(train_loader):
                img_data = img_data.cuda()
                img_label = img_label.cuda()
                r_loss, correct_num, bs_num = self.train_process(
                    net, optimizer, img_data, img_label, loss)
                running_loss += [r_loss]
                total += bs_num
                accnum += correct_num

            scheduler.step()
            avg_loss = np.mean(running_loss)
            temp_acc = 100 * np.float(accnum) / np.float(total)
            logger.info("Epoch %d running_loss=%.3f" % (i + 1, avg_loss))
            logger.info(
                "Accuracy of the prediction on the train dataset : %f %%" %
                (temp_acc))

            # valuate the model
            acc = val_source(net, val_loader)
            if acc >= best_acc:
                logger.info('saving the best model!')
                torch.save(
                    net, './model_source/' + time_stamp_launch + '-' +
                    dataset_name + '9_1_resnet50_best.pkl')
                best_acc = acc
            else:
                torch.save(
                    net, './model_source/' + time_stamp_launch + '-' +
                    dataset_name + '9_1_resnet50_last.pkl')

            logger.info('best acc is : %.04f, acc is : %.04f' %
                        (best_acc, acc))
            logger.info('================================================')

        logger.info("Finished  Training")
Example #20
0
    def train(self):
        torch.multiprocessing.set_sharing_strategy('file_system')
        args = arg_parser()
        time_stamp_launch = time.strftime('%Y%m%d') + '-' + time.strftime('%H%M')
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        n_gpus = len(args.gpu.split(','))

        # set parameters
        path = args.data_path
        label_file = args.label_file

        batch_size = args.batchsize
        epochs = args.max_epoch
        best_acc = 0

        print(
            'visda_9_1_split_wn_label_smooth_synthesis_with_argument' + time_stamp_launch + 'model : resnet101  lr: %s' % args.lr)

        net = resnet101(pretrained=True)
        input_dim = net.fc.in_features
        net.fc = weightNorm(nn.Linear(input_dim, 12), name="weight")

        # for the visualization
        net.vis_fc = nn.Linear(2048, 3).cuda()
        net.class_fc = nn.Linear(3, 12).cuda()

        net = net.cuda()

        param_group = []
        for k, v in net.named_parameters():
            if k[:2] == 'fc':
                param_group += [{'params': v, 'lr': args.lr}]
            else:
                param_group += [{'params': v, 'lr': args.lr * 0.1}]

        loss = CrossEntropyLabelSmooth(num_classes=12).cuda()

        optimizer = optim.SGD(param_group, momentum=0.9, weight_decay=5e-4)
        scheduler = MultiStepLR(optimizer, milestones=args.MultiStepLR, gamma=0.1)

        # setting1
        transform_train_2 = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop((224, 224)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),  # grayscale mean/std
        ])

        train_dataset = visDataset(path, label_file, train=True, transform=transform_train_2)
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                                   num_workers=2 * n_gpus if n_gpus <= 2 else 2)
        # setting1
        transform_test = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),  # grayscale mean/std
        ])

        val_dataset = visDataset(path, label_file, train=False, transform=transform_test)
        val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False,
                                                 num_workers=2 * n_gpus if n_gpus <= 2 else 2)

        for i in range(epochs):
            accnum = 0.0
            accnum_vis = 0.0
            total = 0.0
            running_loss = []
            net.train()

            for j, (img_data, img_label, ind) in enumerate(train_loader):
                img_data = img_data.cuda()
                img_label = img_label.cuda()
                r_loss, correct_num, bs_num, correct_num_vis = self.train_half(net, optimizer, img_data, img_label,
                                                                               loss)
                running_loss += [r_loss]
                total += bs_num
                accnum += correct_num
                accnum_vis += correct_num_vis

            scheduler.step()

            # evaluation
            acc = val_model(net, val_loader)
            if acc >= best_acc:
                print('save the best model.')
                torch.save(net, './model_source/' + time_stamp_launch + '-visDA_9_1_resnet50_best.pkl')
                best_acc = acc
            else:
                torch.save(net, './model_source/' + time_stamp_launch + '-visDA_9_1_resnet50_last.pkl')


        print("Finished training the source model.")