コード例 #1
0
 def _init_models(self):
     # -----------------Content_Encoder-------------------
     self.Content_Encoder = Baseline(self.cfg.DATASETS.NUM_CLASSES_S, 1, self.cfg.MODEL.PRETRAIN_PATH, 'bnneck',
                                   'after', self.cfg.MODEL.NAME, 'imagenet')
     # -----------------Criterion----------------- #
     self.xent = CrossEntropyLabelSmooth(num_classes=self.cfg.DATASETS.NUM_CLASSES_S).cuda()
     self.triplet = TripletLoss(0.3)
     self.Smooth_L1_loss = torch.nn.SmoothL1Loss(reduction='mean').cuda()
     # --------------------Cuda------------------- #
     self.Content_Encoder = torch.nn.DataParallel(self.Content_Encoder).cuda()
コード例 #2
0
def main():
    model = Baseline(num_classes=702,
                     last_stride=1,
                     model_path=' ',
                     stn='no',
                     model_name='resnet50_ibn_a',
                     pretrain_choice=' ')
    model.load_param('models/resnet50_ibn_a/duke_resnet50_ibn_a_model.pth')
    model.to(device)
    model.eval()

    feats = []
    with torch.no_grad():
        img1 = process_img(
            '/home/zzg/Datasets/DukeReiD/DukeMTMC-reID/query/0033_c1_f0057706.jpg'
        )
        feat1 = model(img1)
        feats.append(feat1)

        img2 = process_img(
            '/home/zzg/Datasets/DukeReiD/DukeMTMC-reID/query/0033_c6_f0045755.jpg'
        )
        feat2 = model(img2)
        feats.append(feat2)

        img3 = process_img(
            '/home/zzg/Datasets/DukeReiD/DukeMTMC-reID/query/0034_c2_f0057453.jpg'
        )
        feat3 = model(img3)
        feats.append(feat3)
    feats = torch.cat(feats, dim=0)
    feats = torch.nn.functional.normalize(feats, dim=1, p=2)
    dist = euclidean_dist_rank(feats, feats)
    print(dist)
コード例 #3
0
def main():
    model = Baseline(num_classes=702,
                     last_stride=1,
                     model_path=' ',
                     stn='no',
                     model_name='resnet50_ibn_a',
                     pretrain_choice=' ')
    model.load_param('models/resnet50_ibn_a/duke_resnet50_ibn_a_model.pth')
    model.to(device)
    model.eval()

    feats = []
    with torch.no_grad():
        img1 = process_img(
            '/nfs4/ajaym/Downloads/Ranked_Person_ReID-master/demo_data/1.jpg')
        feat1 = model(img1)
        feats.append(feat1)

        img2 = process_img(
            '/nfs4/ajaym/Downloads/Ranked_Person_ReID-master/demo_data/2.jpg')
        feat2 = model(img2)
        feats.append(feat2)

        img3 = process_img(
            '/nfs4/ajaym/Downloads/Ranked_Person_ReID-master/demo_data/3.jpg')
        feat3 = model(img3)
        feats.append(feat3)
    feats = torch.cat(feats, dim=0)
    feats = torch.nn.functional.normalize(feats, dim=1, p=2)
    dist = euclidean_dist_rank(feats, feats)
    print(dist)
def train(cfg):
    # prepare dataset 训练集,验证集,验证集大小,行人类别数量
    train_loader, val_loader, num_query, num_classes = get_data(cfg)

    # prepare model
    model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE,
                     cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NAME,
                     cfg.MODEL.PRETRAIN_CHOICE, cfg.MODEL.BREACH)
    print('Train with the loss type is',
          cfg.MODEL.METRIC_LOSS_TYPE)  # 损失函数为ranked_loss
    optimizer = make_optimizer(cfg, model)
    loss_func = make_loss(cfg, num_classes)

    # Add for using self trained model
    if cfg.MODEL.PRETRAIN_CHOICE == 'self':
        start_epoch = eval(
            cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')
            [-1])
        print('Start epoch:', start_epoch)
        path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace(
            'model', 'optimizer')
        print('Path to the checkpoint of optimizer:', path_to_optimizer)
        model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
        optimizer.load_state_dict(torch.load(path_to_optimizer))
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD, start_epoch)
    elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
        start_epoch = 0
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD)
    else:
        print('Only support pretrain_choice for imagenet and self, but got {}'.
              format(cfg.MODEL.PRETRAIN_CHOICE))

    do_train(cfg, model, train_loader, val_loader, optimizer, scheduler,
             loss_func, num_query, start_epoch)
コード例 #5
0
    def _init_models(self):
        self.Content_Encoder = Baseline(
            num_classes=self.cfg.DATASETS.NUM_CLASSES_S,
            last_stride=1,
            model_path=self.cfg.MODEL.PRETRAIN_PATH,
            neck='bnneck',
            neck_feat='after',
            model_name='resnet50',
            pretrain_choice='imagenet')

        self.Content_Encoder = torch.nn.DataParallel(
            self.Content_Encoder).cuda()
コード例 #6
0
    def features(self) -> Tuple[nn.Module, nn.Module, int, int]:
        # resnet101 = torchvision.models.resnet101(pretrained=self._pretrained)
        resnet101 = Baseline(num_classes=702, last_stride=2, model_path='', neck='bnneck', neck_feat='after', model_name='resnet101', pretrain_choice='')
        resnet101.load_state_dict(torch.load('/data_hdd/Jaewoo/pretrained_model/r101_duke_state_dict.pth'))

        children = list(resnet101.children())
        children_split = list(children[0].children())

        features = children_split[:-1]
        num_features_out = 1024

        hidden = children_split[-1]
        num_hidden_out = 2048

        # for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:
        for parameters in [feature.parameters() for i, feature in enumerate(features)]: # freezing
            for parameter in parameters:
                parameter.requires_grad = False

        features = nn.Sequential(*features)

        return features, hidden, num_features_out, num_hidden_out
コード例 #7
0
class BaseModel(object):
    def __init__(self, cfg):
        self.cfg = cfg
        self._init_models()
        self._init_optimizers()

        print('---------- Networks initialized -------------')
        print_network(self.Content_Encoder)
        print('-----------------------------------------------')

    def _init_models(self):
        # -----------------Content_Encoder-------------------
        self.Content_Encoder = Baseline(self.cfg.DATASETS.NUM_CLASSES_S, 1, self.cfg.MODEL.PRETRAIN_PATH, 'bnneck',
                                      'after', self.cfg.MODEL.NAME, 'imagenet')
        # -----------------Criterion----------------- #
        self.xent = CrossEntropyLabelSmooth(num_classes=self.cfg.DATASETS.NUM_CLASSES_S).cuda()
        self.triplet = TripletLoss(0.3)
        self.Smooth_L1_loss = torch.nn.SmoothL1Loss(reduction='mean').cuda()
        # --------------------Cuda------------------- #
        self.Content_Encoder = torch.nn.DataParallel(self.Content_Encoder).cuda()

    def _init_optimizers(self):
        self.Content_optimizer = make_optimizer(self.cfg, self.Content_Encoder)
        self.Content_optimizer_fix = make_optimizer(self.cfg, self.Content_Encoder, fix=True)
        self.scheduler = WarmupMultiStepLR(self.Content_optimizer, (30, 55), 0.1, 1.0 / 3,
                                           500, "linear")
        self.scheduler_fix = WarmupMultiStepLR(self.Content_optimizer_fix, (30, 55), 0.1, 1.0 / 3,
                                           500, "linear")
        self.schedulers = []
        self.optimizers = []

    def reset_model_status(self):
        self.Content_Encoder.train()

    def two_classifier(self, epoch, train_loader_s, train_loader_t, writer, logger, rand_src_1, rand_src_2,
                       print_freq=1):
        self.reset_model_status()
        self.epoch = epoch
        self.scheduler.step(epoch)
        self.scheduler_fix.step(epoch)
        target_iter = iter(train_loader_t)
        batch_time = AverageMeter()
        data_time = AverageMeter()
        end = time.time()

        if (epoch < 80) or (110 <= epoch < 170):
            mode = 'normal_c1_c2'
        elif (80 <= epoch < 110) or (170 <= epoch < 210):
            mode = 'reverse_c1_c2'
        elif 210 <= epoch:
            mode = 'fix_c1_c2'
        for i, inputs in enumerate(train_loader_s):
            data_time.update(time.time() - end)
            try:
                inputs_target = next(target_iter)
            except:
                target_iter = iter(train_loader_t)
                inputs_target = next(target_iter)
            img_s, pid_s, camid_s = self._parse_data(inputs)
            img_t, pid_t, camid_t = self._parse_data(inputs_target)
            content_code_s, content_feat_s = self.Content_Encoder(img_s)
            pid_s_12 = np.asarray(pid_s.cpu())
            camid_s = np.asarray(camid_s.cpu())
            idx = []
            for c_id in rand_src_1:
                if len(np.where(c_id == camid_s)[0]) == 0:
                    continue
                else:
                    idx.append(np.where(c_id == camid_s)[0])
            if idx == [] or len(idx[0]) == 1:
                idx = [np.asarray([a]) for a in range(self.cfg.SOLVER.IMS_PER_BATCH)]
            idx = np.concatenate(idx)
            pid_1 = torch.tensor(pid_s_12[idx]).cuda()
            feat_1 = content_feat_s[idx]
            idx = []
            for c_id in rand_src_2:
                if len(np.where(c_id == camid_s)[0]) == 0:
                    continue
                else:
                    idx.append(np.where(c_id == camid_s)[0])
            if idx == [] or len(idx[0]) == 1:
                idx = [np.asarray([a]) for a in range(self.cfg.SOLVER.IMS_PER_BATCH)]
            idx = np.concatenate(idx)
            pid_2 = torch.tensor(pid_s_12[idx]).cuda()
            feat_2 = content_feat_s[idx]
            if mode == 'normal_c1_c2':
                class_1 = self.Content_Encoder(feat_1, mode='c1')
                class_2 = self.Content_Encoder(feat_2, mode='c2')
                ID_loss_1 = self.xent(class_1, pid_1)
                ID_loss_2 = self.xent(class_2, pid_2)
                ID_tri_loss = self.triplet(content_feat_s, pid_s)
                total_loss = ID_loss_1 + ID_loss_2 + ID_tri_loss[0]
                self.Content_optimizer.zero_grad()
                total_loss.backward()
                self.Content_optimizer.step()
                batch_time.update(time.time() - end)
                end = time.time()
                if (i + 1) % print_freq == 0:
                    logger.info('Epoch: [{}][{}/{}]\t'
                                'Time {:.3f} ({:.3f})\t'
                                'Data {:.3f} ({:.3f})\t'
                                'ID_loss: {:.3f}  ID_loss_1: {:.3f}  ID_loss_2: {:.3f}   tri_loss: {:.3f} '
                                .format(epoch, i + 1, len(train_loader_s),
                                        batch_time.val, batch_time.avg,
                                        data_time.val, data_time.avg,
                                        total_loss.item(), ID_loss_1.item(), ID_loss_2.item(), ID_tri_loss[0].item()
                                        ))
            elif mode == 'reverse_c1_c2':
                class_1 = self.Content_Encoder(feat_1, mode='c2')
                class_2 = self.Content_Encoder(feat_2, mode='c1')
                ID_loss_1 = self.xent(class_1, pid_1)
                ID_loss_2 = self.xent(class_2, pid_2)
                ID_tri_loss = self.triplet(content_feat_s, pid_s)
                total_loss = ID_loss_1 + ID_loss_2 + ID_tri_loss[0]
                self.Content_optimizer_fix.zero_grad()
                total_loss.backward()
                self.Content_optimizer_fix.step()
                batch_time.update(time.time() - end)
                end = time.time()
                if (i + 1) % print_freq == 0:
                    logger.info('Epoch: [{}][{}/{}]\t'
                                'Time {:.3f} ({:.3f})\t'
                                'Data {:.3f} ({:.3f})\t'
                                'ID_loss: {:.3f}  ID_loss_1: {:.3f}  ID_loss_2: {:.3f}   tri_loss: {:.3f}'
                                .format(epoch, i + 1, len(train_loader_s),
                                        batch_time.val, batch_time.avg,
                                        data_time.val, data_time.avg,
                                        total_loss.item(), ID_loss_1.item(), ID_loss_2.item(), ID_tri_loss[0].item()
                                        ))
            elif mode == 'fix_c1_c2':
                class_1 = self.Content_Encoder(feat_1, mode='c2')
                class_2 = self.Content_Encoder(feat_2, mode='c1')
                ID_loss_1 = self.xent(class_1, pid_1)
                ID_loss_2 = self.xent(class_2, pid_2)

                content_code_t, content_feat_t = self.Content_Encoder(img_t)
                tar_class_1 = self.Content_Encoder(content_feat_t, mode='c1')
                tar_class_2 = self.Content_Encoder(content_feat_t, mode='c2')
                tar_L1_loss = self.Smooth_L1_loss(tar_class_1, tar_class_2)
                ID_tri_loss = self.triplet(content_feat_s, pid_s)
                arg_c1 = torch.argmax(tar_class_1, dim=1)
                arg_c2 = torch.argmax(tar_class_2, dim=1)
                arg_idx = []
                fake_id = []
                for i_dx, data in enumerate(arg_c1):
                    if (data == arg_c2[i_dx]) and (((tar_class_1[i_dx][data] + tar_class_2[i_dx][arg_c2[i_dx]])/2) > 0.8):
                        arg_idx.append(i_dx)
                        fake_id.append(data)
                if 210 <= epoch < 220:
                    if arg_idx != []:
                        ID_loss_fake = self.xent(content_code_t[arg_idx], torch.tensor(fake_id).cuda())
                        total_loss = ID_loss_1 + ID_loss_2 + 0.5 * tar_L1_loss + ID_tri_loss[0]
                    else:
                        ID_loss_fake = torch.tensor([0])
                        total_loss = ID_loss_1 + ID_loss_2 + 0.5 * tar_L1_loss + ID_tri_loss[0]
                if 220 <= epoch:
                    if arg_idx != []:
                        ID_loss_fake = self.xent(content_code_t[arg_idx], torch.tensor(fake_id).cuda())
                        total_loss = ID_loss_1 + ID_loss_2 + 0.08 * ID_loss_fake + ID_tri_loss[0] + 0.5 * tar_L1_loss
                    else:
                        ID_loss_fake = torch.tensor([0])
                        total_loss = ID_loss_1 + ID_loss_2 + ID_tri_loss[0] + 0.5 * tar_L1_loss

                self.Content_optimizer_fix.zero_grad()
                total_loss.backward()
                self.Content_optimizer_fix.step()
                batch_time.update(time.time() - end)
                end = time.time()
                if (i + 1) % print_freq == 0:
                    logger.info('Epoch: [{}][{}/{}]\t'
                                'Time {:.3f} ({:.3f})\t'
                                'Data {:.3f} ({:.3f})\t'
                                'ID_loss: {:.3f}  ID_loss_1: {:.3f}  ID_loss_2: {:.3f}  tar_L1_loss: {:.3f}  tri_loss: {:.3f}  ID_loss_fake:  {:.6f}'
                                .format(epoch, i + 1, len(train_loader_s),
                                        batch_time.val, batch_time.avg,
                                        data_time.val, data_time.avg,
                                        total_loss.item(), ID_loss_1.item(), ID_loss_2.item(), tar_L1_loss.item(),
                                        ID_tri_loss[0].item(), ID_loss_fake.item()))
    def _parse_data(self, inputs):
        imgs, pids, camids = inputs
        inputs = imgs.cuda()
        targets = pids.cuda()
        camids = camids.cuda()
        return inputs, targets, camids
コード例 #8
0
def list_pictures(directory):
    imgs = sorted([opj(directory, img) for img in os.listdir(directory)],
                  key=lambda x: int(x.split('/')[-1].split('.')[0]))
    return imgs


def read_image(img_path):
    img = default_loader(img_path)
    img = test_transform(img)
    img = img.unsqueeze(0)
    return img


if __name__ == '__main__':
    model_path = 'your model path'
    model = Baseline(10000, 1, model_path, 'bnneck', 'before', 'resnet50',
                     'self')

    model = model.to('cuda')
    model = nn.DataParallel(model)

    resume = torch.load(model_path)
    model.load_state_dict(resume)

    model.eval()

    print('create multiprocessing...')
    pool = multiprocessing.Pool(processes=32)
    print('after create multiprocessing...')

    query_dir = './image_query/'
    test_dir = './image_test/'