Ejemplo n.º 1
0
def create_model(args):
    model_1 = models.create(args.arch,
                            num_features=args.features,
                            dropout=args.dropout,
                            num_classes=1)

    model_1_ema = models.create(args.arch,
                                num_features=args.features,
                                dropout=args.dropout,
                                num_classes=1)

    model_1.cuda()
    model_1_ema.cuda()
    model_1 = nn.DataParallel(model_1)
    model_1_ema = nn.DataParallel(model_1_ema)

    if args.no_source:
        print('No source pre-training')
    else:
        initial_weights = load_checkpoint(args.init_1)
        copy_state_dict(initial_weights['state_dict'], model_1)
        copy_state_dict(initial_weights['state_dict'], model_1_ema)

    for param in model_1_ema.parameters():
        param.detach_()

    return model_1, model_1_ema
Ejemplo n.º 2
0
def create_model(args, classes, phase):
    '''
    创建模型,新建模型与保存的模型结构相同,以便可以进行载入权重
    :param args:
    :param classes:
    :param phase:
    :return:
    '''
    model_ema = models.create(args.arch,
                              num_features=args.features,
                              dropout=args.dropout,
                              num_classes=classes)
    initial_weights = load_checkpoint(
        osp.join(args.init, 'phase{}_model_best.pth.tar'.format(phase)))
    if 'module.classifier.fc1.weight' in initial_weights['state_dict'].keys():
        in_features = initial_weights['state_dict'][
            'module.classifier.fc1.weight'].data.size(1)
        out_features1 = initial_weights['state_dict'][
            'module.classifier.fc1.weight'].data.size(0)
        out_features2 = initial_weights['state_dict'][
            'module.classifier.fc2.weight'].data.size(0)
        new_fc = SplitCosineLinear(in_features,
                                   out_features1,
                                   out_features2,
                                   sigma=True)
        model_ema.classifier = new_fc
        new_fc_max = copy.deepcopy(new_fc)
        model_ema.classifier_max = new_fc_max
    else:
        in_features = initial_weights['state_dict'][
            'module.classifier.weight'].data.size(1)
        out_features = initial_weights['state_dict'][
            'module.classifier.weight'].data.size(0)
        new_fc = CosineLinear(in_features=in_features,
                              out_features=out_features,
                              sigma=True)
        model_ema.classifier = new_fc
        new_fc_max = copy.deepcopy(new_fc)
        model_ema.classifier_max = new_fc_max
    copy_state_dict(initial_weights['state_dict'], model_ema, strip='module.')
    model_cur = copy.deepcopy(model_ema)
    model_ref = copy.deepcopy(model_ema)
    model_ema.cuda()
    model_ema = nn.DataParallel(model_ema)
    model_cur.cuda()
    model_cur = nn.DataParallel(model_cur)
    model_ref.cuda()
    model_ref = nn.DataParallel(model_ref)
    for param in model_ema.parameters():
        param.detach_()
    return model_cur, model_ema, model_ref
Ejemplo n.º 3
0
def main():
    args = parser.parse_args()
    if args.seed is not None:
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True

    ########################################
    cudnn.benchmark = True
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    ########处理dataset###########
    dataset_target = get_data(args.dataset_target, args.data_dir)

    # 创建测试使用的dataloader
    test_loader_target = get_test_loader(dataset_target, args.height,
                                         args.width, 256,
                                         args.workers)  #args.batch_size * 4

    # 创建数据ID流
    order_name = "./checkpoint/seed_{}_{}_order.pkl".format(
        args.seed, args.dataset_target)
    print("Order name:{}".format(order_name))
    if os.path.exists(order_name):
        print("Loading orders")
        order = unpickle(order_name)
    else:
        print("Generating orders")
        order = np.arange(dataset_target.num_train_pids)
        np.random.shuffle(order)
        savepickle(order, order_name)
    order_list = list(order)
    print(order_list)

    init_class_num = dataset_target.num_train_pids - (args.num_phase -
                                                      1) * args.nb_cl

    start_phase = 1
    for phase in range(start_phase, args.num_phase):
        global best_mAP
        best_mAP = 0
        if phase > 0:
            proto_dataset = unpickle(
                osp.join(args.logs_dir, 'phase{}_proto.pkl'.format(phase - 1)))
        else:
            proto_dataset = []
        old_class_num = get_old_class_num(proto_dataset)
        print('\n\n phase {} have {} old classes'.format(phase, old_class_num))
        # 每个周期都需要载入最佳模型模型。
        model_cur, model_ema, model_ref = create_model(args, 100, phase)
        evaluator_ema = Evaluator(model_ema)
        # 更新 input dataset
        if phase == 0:
            iters = 200
            model_ref = None
            input_id = order_list[0:init_class_num]
            input_dataset = [(fname, pid, cam)
                             for (fname, pid, cam) in dataset_target.train
                             if pid in input_id]
            print('phase:{} input id:{},input image:{}'.format(
                phase, init_class_num, len(input_dataset)))
        else:
            iters = args.iters
            input_id = order_list[init_class_num +
                                  (phase - 1) * args.nb_cl:init_class_num +
                                  phase * args.nb_cl]
            input_dataset = [(fname, pid, cam)
                             for (fname, pid, cam) in dataset_target.train
                             if pid in input_id]
            print('phase:{} input id:{},input image:{}'.format(
                phase, args.nb_cl, len(input_dataset)))

        tar_cluster_loader = get_test_loader(dataset_target,
                                             args.height,
                                             args.width,
                                             256,
                                             workers=args.workers,
                                             testset=sorted(input_dataset))

        for epoch in range(args.epochs):
            dict_f, _, dic_logit = extract_features(model_ema,
                                                    tar_cluster_loader,
                                                    print_freq=40)
            cf = torch.stack(list(dict_f.values()))  #已经经过normalize
            agent_sim = torch.stack(list(dic_logit.values()))

            agent_sim = agent_sim[:, :
                                  old_class_num]  # probs dim=1,是否与old_num相同。
            agent_sim = F.softmax(agent_sim, dim=1)
            agent_sim_dist = torch.cdist(agent_sim, agent_sim, p=1) / 2
            agent_sim_dist = agent_sim_dist.numpy()
            rerank_dist = compute_jaccard_dist(
                cf, use_gpu=True).numpy()  # 经过rerank距离在0到1之间 args.rr_gpu
            lambda_a = 0.3 if phase == 0 else 0
            total_dist = (1 -
                          lambda_a) * rerank_dist + lambda_a * agent_sim_dist
            if (epoch == 0):
                #DBSCAN cluster
                tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
                tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
                tri_mat = np.sort(tri_mat, axis=None)
                rho = 2e-3
                top_num = np.round(rho * tri_mat.size).astype(int)
                eps = tri_mat[:top_num].mean()
                print('eps for cluster: {:.3f}'.format(eps))
                cluster = DBSCAN(eps=eps,
                                 min_samples=4,
                                 metric='precomputed',
                                 n_jobs=-1)
            print('Clustering and labeling...')
            labels = cluster.fit_predict(total_dist)
            num_ids = len(set(labels)) - (1 if -1 in labels else 0)
            args.num_clusters = num_ids
            print('\n Clustered into {} classes \n'.format(args.num_clusters))

            # generate new dataset and calculate cluster centers
            new_dataset = []
            cluster_centers = collections.defaultdict(list)
            for i, ((fname, _, cid),
                    label) in enumerate(zip(sorted(input_dataset), labels)):
                if label == -1:
                    continue
                new_dataset.append((fname, label + old_class_num, cid))
                if label == -1:
                    print('error')
                cluster_centers[label].append(cf[i])

            cluster_centers = [
                torch.stack(cluster_centers[idx]).mean(0)
                for idx in sorted(cluster_centers.keys())
            ]
            cluster_centers = torch.stack(cluster_centers)
            cluster_centers_avg = F.normalize(cluster_centers[:, :2048],
                                              dim=1).float().cuda()
            cluster_centers_max = F.normalize(cluster_centers[:, 2048:],
                                              dim=1).float().cuda()
            # 新的类别为num_ids,开始更新模型分类器系数 consineLinear
            # 在每个epoch中不断更新 cur 和 ema的 new_fc层
            in_features = model_ema.module.classifier.in_features
            if phase == 0:
                out_features = args.num_clusters
                #创建new_fc_avg 为 model_cur和model_ema的avg赋值。
                new_fc_avg = CosineLinear(in_features=in_features,
                                          out_features=out_features,
                                          sigma=True).cuda()
                print("in_features:", in_features, "out_features:",
                      out_features)
                # Evaluator
                new_fc_avg.weight.data.copy_(cluster_centers_avg)
                #new_fc.sigma.data = (model_ema.module.classifier.sigma.data+model_ema.module.classifier_max.sigma.data)/2  后面sigma 按20来处理
                model_ema.module.classifier = new_fc_avg
                model_cur.module.classifier = copy.deepcopy(new_fc_avg)

                # 创建new_fc_max 为 model_cur和model_ema的classifier_max赋值。
                new_fc_max = CosineLinear(in_features=in_features,
                                          out_features=out_features,
                                          sigma=True).cuda()
                new_fc_max.weight.data.copy_(cluster_centers_max)
                model_ema.module.classifier_max = new_fc_max
                model_cur.module.classifier_max = copy.deepcopy(new_fc_max)
                cur_lamda = 0

            elif phase == 1:
                ############################################################
                # increment classe
                if epoch == 0:
                    out_features = model_ema.module.classifier.out_features
                    print("in phase 1:epoch 0 : in_features:", in_features,
                          "out_features:", out_features)

                    new_fc_cur = SplitCosineLinear(in_features, out_features,
                                                   args.num_clusters).cuda()
                    new_fc_cur.fc1.weight.data = model_cur.module.classifier.weight.data  #shiyong id()lai panduan shifou tongyidizhi
                    new_fc_cur.fc2.weight.data.copy_(cluster_centers_avg)
                    model_cur.module.classifier = new_fc_cur

                    new_fc_cur_max = SplitCosineLinear(
                        in_features, out_features, args.num_clusters).cuda()
                    new_fc_cur_max.fc1.weight.data = model_cur.module.classifier_max.weight.data
                    new_fc_cur_max.fc2.weight.data.copy_(cluster_centers_max)
                    model_cur.module.classifier_max = new_fc_cur_max

                    new_fc_ema = SplitCosineLinear(in_features, out_features,
                                                   args.num_clusters).cuda()
                    new_fc_ema.fc1.weight.data = model_ema.module.classifier.weight.data
                    new_fc_ema.fc2.weight.data.copy_(cluster_centers_avg)
                    model_ema.module.classifier = new_fc_ema

                    new_fc_ema_max = SplitCosineLinear(
                        in_features, out_features, args.num_clusters).cuda()
                    new_fc_ema_max.fc1.weight.data = model_ema.module.classifier_max.weight.data
                    new_fc_ema_max.fc2.weight.data.copy_(cluster_centers_max)
                    model_ema.module.classifier_max = new_fc_ema_max

                else:
                    out_features = model_ema.module.classifier.fc1.out_features
                    new_ema_fc = CosineLinear(in_features,
                                              args.num_clusters,
                                              sigma=False).cuda()
                    new_ema_fc.weight.data.copy_(cluster_centers_avg)
                    model_ema.module.classifier.fc2 = new_ema_fc

                    new_fc_cur = copy.deepcopy(new_ema_fc)
                    model_cur.module.classifier.fc2 = new_fc_cur

                    new_ema_fc_max = CosineLinear(in_features,
                                                  args.num_clusters,
                                                  sigma=False).cuda()
                    new_ema_fc_max.weight.data.copy_(cluster_centers_max)
                    model_ema.module.classifier_max.fc2 = new_ema_fc_max

                    new_fc_cur_max = copy.deepcopy(new_ema_fc_max)
                    model_cur.module.classifier_max.fc2 = new_fc_cur_max
                lamda_mult = out_features * 1.0 / args.num_clusters  # class_old / class_new
                cur_lamda = args.lamda * math.sqrt(lamda_mult)
                print("###############################")
                print("Lamda for less forget is set to ", cur_lamda)
                print("###############################")
                assert model_ema.module.classifier.fc1.weight.data.size(
                    0) == old_class_num
                assert model_ema.module.classifier.fc2.weight.data.size(
                    0) == args.num_clusters
            else:
                if epoch == 0:
                    out_features1 = model_ema.module.classifier.fc1.out_features
                    out_features2 = model_ema.module.classifier.fc2.out_features
                    out_features = out_features1 + out_features2
                    print("in_features:", in_features, "out_features1:", \
                          out_features1, "out_features2:", out_features2)

                    new_fc_cur = SplitCosineLinear(in_features, out_features,
                                                   args.num_clusters).cuda()
                    new_fc_cur.fc1.weight.data[:out_features1].copy_(
                        model_cur.module.classifier.fc1.weight.data)
                    new_fc_cur.fc1.weight.data[out_features1:].copy_(
                        model_cur.module.classifier.fc2.weight.data)
                    new_fc_cur.fc2.weight.data.copy_(cluster_centers_avg)
                    #new_fc_cur.sigma.data = model_cur.module.classifier.sigma.data
                    model_cur.module.classifier = new_fc_cur

                    new_fc_cur_max = SplitCosineLinear(
                        in_features, out_features, args.num_clusters).cuda()
                    new_fc_cur_max.fc1.weight.data[:out_features1].copy_(
                        model_cur.module.classifier_max.fc1.weight.data)
                    new_fc_cur_max.fc1.weight.data[out_features1:].copy_(
                        model_cur.module.classifier_max.fc2.weight.data)
                    new_fc_cur_max.fc2.weight.data.copy_(cluster_centers_max)
                    #new_fc_cur_max.sigma.data = model_cur.module.classifier_max.sigma.data
                    model_cur.module.classifier_max = new_fc_cur_max

                    new_fc_ema = SplitCosineLinear(in_features, out_features,
                                                   args.num_clusters).cuda()
                    new_fc_ema.fc1.weight.data[:out_features1].copy_(
                        model_ema.module.classifier.fc1.weight.data)
                    new_fc_ema.fc1.weight.data[out_features1:].copy_(
                        model_ema.module.classifier.fc2.weight.data)
                    new_fc_ema.fc2.weight.data.copy_(cluster_centers_avg)
                    #new_fc_ema.sigma.data = model_ema.module.classifier.sigma.data
                    model_ema.module.classifier = new_fc_ema

                    new_fc_ema_max = SplitCosineLinear(
                        in_features, out_features, args.num_clusters).cuda()
                    new_fc_ema_max.fc1.weight.data[:out_features1].copy_(
                        model_ema.module.classifier_max.fc1.weight.data)
                    new_fc_ema_max.fc1.weight.data[out_features1:].copy_(
                        model_ema.module.classifier_max.fc2.weight.data)
                    new_fc_ema_max.fc2.weight.data.copy_(cluster_centers_max)
                    #new_fc_ema_max.sigma.data  = model_ema.module.classifier_max.sigma.data
                    model_ema.module.classifier_max = new_fc_ema_max

                else:
                    out_features = model_ema.module.classifier.fc1.out_features

                    new_ema_fc = CosineLinear(in_features,
                                              args.num_clusters,
                                              sigma=False).cuda()
                    new_ema_fc.weight.data.copy_(cluster_centers_avg)
                    model_ema.module.classifier.fc2 = new_ema_fc

                    new_fc_cur = copy.deepcopy(new_ema_fc)
                    model_cur.module.classifier.fc2 = new_fc_cur

                    new_ema_fc_max = CosineLinear(in_features,
                                                  args.num_clusters,
                                                  sigma=False).cuda()
                    new_ema_fc_max.weight.data.copy_(cluster_centers_max)
                    model_ema.module.classifier_max.fc2 = new_ema_fc_max
                    new_fc_cur_max = copy.deepcopy(new_ema_fc_max)
                    model_cur.module.classifier_max.fc2 = new_fc_cur_max
                lamda_mult = (out_features) * 1.0 / (args.nb_cl)
                cur_lamda = args.lamda * math.sqrt(lamda_mult)
                print("###############################")
                print("Lamda for less forget is set to ", cur_lamda)
                print("###############################")
                assert model_ema.module.classifier.fc1.weight.data.size(
                    0) == old_class_num
                assert model_ema.module.classifier.fc2.weight.data.size(
                    0) == args.num_clusters

            # 生成相应的trainloader & optimizer
            # 设置optimizer.
            params = []
            for key, value in model_cur.named_parameters():
                if not value.requires_grad:
                    continue
                if key == 'module.classifier.sigma' or key == 'module.classifier_max.sigma':  # 需要更改温度系数的weight_decay, cosine loss中使用weight decay=0.1
                    params += [{
                        "params": [value],
                        "lr": args.lr,
                        "weight_decay": args.weight_decay * 100
                    }]
                    print('key: {} ,weight decay : {}, value : {}'.format(
                        key, args.weight_decay * 100, value))
                elif key == 'module.classifier.fc1.weight' or key == 'module.classifier_max.fc1.weight':
                    params += [{
                        "params": [value],
                        "lr": args.lr,
                        "weight_decay": args.weight_decay
                    }]  #从零改变3.5e-4
                    print('lr of {} is 0'.format(key))
                else:
                    params += [{
                        "params": [value],
                        "lr": args.lr,
                        "weight_decay": args.weight_decay
                    }]
            optimizer = torch.optim.Adam(params)

            # 设置dataloader与dataset

            if phase > 0:
                example_dataset = new_dataset + proto_dataset

            train_loader_examplar = get_train_loader(dataset_target,
                                                     args.height,
                                                     args.width,
                                                     args.batch_size,
                                                     args.workers,
                                                     args.num_instances,
                                                     iters,
                                                     trainset=example_dataset)
            train_loader_target = get_train_loader(dataset_target,
                                                   args.height,
                                                   args.width,
                                                   args.batch_size,
                                                   args.workers,
                                                   args.num_instances,
                                                   iters,
                                                   trainset=new_dataset)

            # Trainer
            trainer = UsicTrainer_E(model_cur=model_cur,
                                    model_ema=model_ema,
                                    model_ref=model_ref,
                                    old_class_num=old_class_num,
                                    new_class_num=args.num_clusters,
                                    alpha=args.alpha)

            train_loader_target.new_epoch()
            train_loader_examplar.new_epoch(
            )  # 为了统一,可以在外面生成train_loader_examplar,在train()不使用
            trainer.train(phase,
                          epoch,
                          train_loader_target,
                          train_loader_examplar,
                          optimizer,
                          cur_lamda,
                          ce_soft_weight=args.soft_ce_weight,
                          print_freq=args.print_freq,
                          train_iters=iters)

            def save_model(model_ema, is_best, best_mAP, phase):
                save_checkpoint(
                    {
                        'state_dict': model_ema.state_dict(),
                        'phase': phase,
                        'epoch': epoch + 1,
                        'best_mAP': best_mAP,
                    },
                    is_best,
                    fpath=osp.join(
                        args.logs_dir,
                        'phase{}_model_checkpoint.pth.tar'.format(phase)))

            if ((epoch + 1) % args.eval_step == 0
                    or (epoch == args.epochs - 1)):
                mAP_1 = evaluator_ema.evaluate(test_loader_target,
                                               dataset_target.query,
                                               dataset_target.gallery,
                                               cmc_flag=False)
                is_best = mAP_1 > best_mAP
                best_mAP = max(mAP_1, best_mAP)

                save_model(model_ema, is_best, best_mAP, phase + 1)
                print(
                    '\n * Finished phase {:3d} epoch {:3d}  model no.1 mAP: {:5.1%}  best: {:5.1%}{}\n'
                    .format(phase, epoch, mAP_1, best_mAP,
                            ' *' if is_best else ''))

        # 更新examplar
        # 载入最佳模型到model_ema
        print('update proto_dataset')
        best_weights = load_checkpoint(
            osp.join(args.init,
                     'phase{}_model_best.pth.tar'.format(phase + 1)))
        copy_state_dict(best_weights['state_dict'], model_ema)
        # 提取特征,并进行聚类
        dict_f, _, dic_logit = extract_features(model_ema,
                                                tar_cluster_loader,
                                                print_freq=40)
        cf = torch.stack(list(dict_f.values()))  # 已经经过normalize
        agent_sim = torch.stack(list(dic_logit.values()))

        agent_sim = agent_sim[:, :old_class_num]  # probs dim=1,是否与old_num相同。
        agent_sim = F.softmax(agent_sim, dim=1)
        agent_sim_dist = torch.cdist(agent_sim, agent_sim, p=1) / 2
        agent_sim_dist = agent_sim_dist.numpy()
        rerank_dist = compute_jaccard_dist(
            cf, use_gpu=True).numpy()  # 经过rerank距离在0到1之间 args.rr_gpu
        lambda_a = 0.3 if phase == 0 else 0
        rerank_dist = (1 - lambda_a) * rerank_dist + lambda_a * agent_sim_dist
        # dict_f, _,_= extract_features(model_ema, tar_cluster_loader, print_freq=40)
        # cf = torch.stack(list(dict_f.values()))
        # rerank_dist = compute_jaccard_dist(cf, use_gpu=args.rr_gpu).numpy()

        #generate DBSCAN
        if epoch == 0:
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            rho = 2e-3
            top_num = np.round(rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps for cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=-1)

        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - (1 if -1 in labels else 0)
        args.num_clusters = num_ids
        print('phase {}, Clustered into {} example classes '.format(
            phase, args.num_clusters))
        # 计算每个类的所有特征
        class_features = collections.defaultdict(list)
        image_index = collections.defaultdict(list)
        for i, label in enumerate(labels):
            if label == -1:
                continue
            class_features[label].append(cf[i])
            image_index[label].append(i)
        class_features = [
            torch.stack(class_features[idx])
            for idx in sorted(class_features.keys())
        ]  # list,其中元素为每个类特征组成的张量
        image_index = [image_index[idx] for idx in sorted(image_index.keys())]
        tmp_dataset = sorted(input_dataset)
        #example_dataset, class_remain = select_proto(class_features, image_index, tmp_dataset, old_class_num=old_class_num)
        example_dataset, class_remain = select_proto(
            class_features,
            image_index,
            tmp_dataset,
            old_class_num=old_class_num,
            mode='herd',
            delete_ratio=0.05)
        nmi = eval_nmi(example_dataset)
        print('NMI of phase{} is {} '.format(phase, nmi))

        # 保存此时的proto_dataset
        proto_dataset.extend(example_dataset)
        proto_dataset_save = osp.join(args.logs_dir,
                                      'phase{}_proto.pkl'.format(phase))
        savepickle(proto_dataset, proto_dataset_save)

        # 更新最新模型FC参数
        class_features = torch.stack(
            [torch.mean(features, dim=0) for features in class_remain])
        class_features_avg = F.normalize(class_features[:, :2048],
                                         dim=1).float().cuda()
        class_features_max = F.normalize(class_features[:, 2048:],
                                         dim=1).float().cuda()
        in_features = model_ema.module.classifier.in_features
        if isinstance(model_ema.module.classifier, CosineLinear):
            new_fc_avg = CosineLinear(in_features,
                                      len(class_features),
                                      sigma=True)
            new_fc_avg.weight.data.copy_(class_features_avg)
            # new_fc.sigma.data = model_ema.module.classifier.sigma.data
            model_ema.module.classifier = new_fc_avg

            new_fc_max = CosineLinear(in_features,
                                      len(class_features),
                                      sigma=True)
            new_fc_max.weight.data.copy_(class_features_max)
            # new_fc_max.sigma.data = model_ema.module.classifier_max.sigma.data
            model_ema.module.classifier_max = new_fc_max

        else:
            new_fc_avg = CosineLinear(in_features,
                                      len(class_features),
                                      sigma=False)
            new_fc_avg.weight.data.copy_(class_features_avg)
            model_ema.module.classifier.fc2 = new_fc_avg

            new_fc_max = CosineLinear(in_features,
                                      len(class_features),
                                      sigma=False)
            new_fc_max.weight.data.copy_(class_features_max)
            model_ema.module.classifier_max.fc2 = new_fc_max

        state = {
            'state_dict': model_ema.state_dict(),
            'phase': best_weights['phase'],
            'epoch': best_weights['epoch'],
            'best_mAP': best_weights['best_mAP'],
        }

        torch.save(
            state,
            osp.join(args.logs_dir,
                     'phase{}_model_best.pth.tar'.format(phase + 1)))
Ejemplo n.º 4
0
def main_worker(args):
    global start_epoch, best_mAP

    cudnn.benchmark = True

    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    iters = args.iters if (args.iters>0) else None
    dataset_target = get_data(args.dataset_target, args.data_dir)
    ori_train = dataset_target.train
    if not args.no_source:
        dataset_source = get_data(args.dataset_source, args.data_dir)
    test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)

    # Create model
    model_1, model_1_ema = create_model(args)

    # Evaluator
    evaluator_1_ema = Evaluator(model_1_ema)

    best_mAP = 0

    for nc in range(args.epochs):

        cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
                                         testset=dataset_target.train)
        dict_f, _ = extract_features(model_1_ema, cluster_loader, print_freq=50)
        cf_1 = torch.stack(list(dict_f.values()))

        # DBSCAN cluster
        if args.no_source:
            rerank_dist = compute_jaccard_dist(cf_1, lambda_value=0, source_features=None,
                                               use_gpu=False).numpy()
        else:
            cluster_loader_source = get_test_loader(dataset_source, args.height, args.width, args.batch_size,
                                                    args.workers, testset=dataset_source.train)
            dict_f_source, _ = extract_features(model_1_ema, cluster_loader_source, print_freq=50)
            cf_1_source = torch.stack(list(dict_f_source.values()))
            rerank_dist = compute_jaccard_dist(cf_1, lambda_value=args.lambda_value, source_features=cf_1_source,
                                               use_gpu=False).numpy()
            del cf_1_source
        tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
        tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
        tri_mat = np.sort(tri_mat, axis=None)
        top_num = np.round(args.rho * tri_mat.size).astype(int)
        eps = tri_mat[:top_num].mean()
        print('eps in cluster: {:.3f}'.format(eps))
        print('Clustering and labeling...')
        cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) -1

        print('Epoch {} have {} training ids'.format(nc, num_ids))
        # generate new dataset
        labeled_ind, unlabeled_ind = [], []
        for ind, label in enumerate(labels):
            if label == -1:
                unlabeled_ind.append(ind)
            else:
                labeled_ind.append(ind)
        # print('Epoch {} have {} labeled samples and {} unlabeled samples'.format(nc + 1, len(labeled_ind), len(unlabeled_ind)))

        cf_1 = cf_1.numpy()
        centers = []
        for id in range(num_ids):
            centers.append(np.mean(cf_1[labels == id], axis=0))
        centers = np.stack(centers, axis=0)

        del cf_1, rerank_dist

        model_1.module.classifier = nn.Linear(2048, num_ids, bias=False).cuda()
        model_1_ema.module.classifier = nn.Linear(2048, num_ids, bias=False).cuda()
        model_1.module.classifier_max = nn.Linear(2048, num_ids, bias=False).cuda()
        model_1_ema.module.classifier_max = nn.Linear(2048, num_ids, bias=False).cuda()

        model_1.module.classifier.weight.data.copy_(
            torch.from_numpy(normalize(centers[:, :2048], axis=1)).float().cuda())
        model_1_ema.module.classifier.weight.data.copy_(
            torch.from_numpy(normalize(centers[:, :2048], axis=1)).float().cuda())

        model_1.module.classifier_max.weight.data.copy_(
            torch.from_numpy(normalize(centers[:, 2048:], axis=1)).float().cuda())
        model_1_ema.module.classifier_max.weight.data.copy_(
            torch.from_numpy(normalize(centers[:, 2048:], axis=1)).float().cuda())

        del centers

        target_label = labels

        for i in range(len(dataset_target.train)):
            dataset_target.train[i] = list(dataset_target.train[i])
            dataset_target.train[i][1] = int(target_label[i])
            dataset_target.train[i] = tuple(dataset_target.train[i])

        # Optimizer
        params = []
        for key, value in model_1.named_parameters():
            if not value.requires_grad:
                continue
            params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]

        optimizer = torch.optim.Adam(params)

        # Trainer
        trainer = ABMTTrainer(model_1, model_1_ema, num_cluster=num_ids, alpha=args.alpha)
        epoch = nc
        # # DBSCAN
        dataset_target.train = [ori_train[i] for i in labeled_ind]
        print(len(dataset_target.train), 'are labeled.')
        labeled_loader_target = get_train_loader(dataset_target, args.height, args.width,
                                               args.batch_size, args.workers, args.num_instances, iters, mutual=True)
        labeled_loader_target.new_epoch()

        trainer.train(epoch, labeled_loader_target, optimizer,
                    print_freq=args.print_freq, train_iters=len(labeled_loader_target))

        def save_model(model_ema, is_best, best_mAP, mid, num_ids):
            save_checkpoint({
                'state_dict': model_ema.state_dict(),
                'epoch': epoch + 1,
                'best_mAP': best_mAP,
                'num_ids': num_ids
            }, is_best, fpath=osp.join(args.logs_dir, 'model'+str(mid)+'_checkpoint.pth.tar'))

        if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
            print('Evaluating teacher net:')
            cmc, mAP_1 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
            is_best = (mAP_1>best_mAP)
            best_mAP = max(mAP_1, best_mAP)

            save_model(model_1_ema, is_best, best_mAP, 1, num_ids)
            dataset_target.train = ori_train
    print ('Test on the best model.')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model_best = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=checkpoint['num_ids'])
    model_best.cuda()
    model_best = nn.DataParallel(model_best)
    evaluator_best = Evaluator(model_best)
    model_best.load_state_dict(checkpoint['state_dict'])
    evaluator_best.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
Ejemplo n.º 5
0
def main_worker(args):
    global start_epoch, best_mAP

    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    else:
        log_dir = osp.dirname(args.resume)
        sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    iters = args.iters if (args.iters>0) else None
    dataset_source, num_classes, train_loader_source, test_loader_source = \
        get_data(args.dataset_source, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, args.num_instances, iters)

    dataset_target, _, train_loader_target, test_loader_target = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers, 0, iters)

    # Create model

    model = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=num_classes)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        copy_state_dict(checkpoint['state_dict'], model)
        start_epoch = checkpoint['epoch']
        best_mAP = checkpoint['best_mAP']
        print("=> Start epoch {}  best mAP {:.1%}"
              .format(start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test on source domain:")
        evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True, rerank=args.rerank)
        print("Test on target domain:")
        evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
        return

    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
    optimizer = torch.optim.Adam(params)
    lr_scheduler = WarmupMultiStepLR(optimizer, args.milestones, gamma=0.1, warmup_factor=0.01, warmup_iters=args.warmup_step)

    # Trainer
    trainer = ABMTPreTrainer(model, num_classes, margin=args.margin)

    # Start training
    for epoch in range(start_epoch, args.epochs):

        train_loader_source.new_epoch()
        train_loader_target.new_epoch()

        trainer.train(epoch, train_loader_source, train_loader_target, optimizer,
                    train_iters=len(train_loader_source), print_freq=args.print_freq)
        lr_scheduler.step()
        if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):

            _, mAP = evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True)

            is_best = mAP > best_mAP
            best_mAP = max(mAP, best_mAP)
            save_checkpoint({
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_mAP': best_mAP,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

            print('\n * Finished epoch {:3d}  source mAP: {:5.1%}  best: {:5.1%}{}\n'.
                  format(epoch, mAP, best_mAP, ' *' if is_best else ''))

    print("Test on target domain:")
    evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)