Exemplo n.º 1
0
def main():
    # get dataset

    queries_dataset, gallery_dataset = \
        get_dataset_val(root=
                        "/media/fanyang/workspace/DataSet/MARS/bbox_test",
                        txt_file=os.path.join(Proj_Dir, 'data/id2folderVal.txt'), seq_len=SEQ_LEN)

    # prepare model
    model = MixModel(num_classes=625, batch_size=BATCH_SIZE, seq_len=SEQ_LEN,
                     transform_input=True)

    model.load_state_dict(
        torch.load("/home/fanyang/PycharmProjects/PersonReID_CL/ckptseq/model-v1.pkl"))
    model.cuda()

    queries_loader = get_loader(dataset=queries_dataset, batch_size=BATCH_SIZE,
                                shuffle=False, drop_last=False)
    gallery_loader = get_loader(dataset=gallery_dataset, batch_size=BATCH_SIZE,
                                shuffle=False, drop_last=False)

    query_test_count = gallery_dataset.get_query_test_count()

    validation(queries_loader=queries_loader, gallery_loader=gallery_loader, model=model,
               query_test_count=query_test_count)
Exemplo n.º 2
0
def main():
    # get dataset

    queries_dataset, gallery_dataset = \
        get_dataset_val(root=
                        "/media/fanyang/workspace/DataSet/MARS/bbox_test",
                        txt_file=os.path.join(Proj_Dir, 'data/id2folderVal.txt'), seq_len=SEQ_LEN)

    # prepare model
    model = Inceptionv2()
    # resnet.criterion2 = CenterLoss(feature_len=2048, num_classes=625)
    # resnet.criterion2 = ImprovedCenterLoss(feature_len=2048, num_classes=625)
    model.load_state_dict(
        torch.load(
            "/home/fanyang/PycharmProjects/PersonReID_CL/ckpt/no-cl-model-inceptionv2/record-step-2109-model.pkl"
        ))

    model.classifier = None
    model.cuda()

    queries_loader = get_loader(dataset=queries_dataset,
                                batch_size=BATCH_SIZE,
                                shuffle=False,
                                drop_last=False)
    gallery_loader = get_loader(dataset=gallery_dataset,
                                batch_size=BATCH_SIZE,
                                shuffle=False,
                                drop_last=False)

    query_test_count = gallery_dataset.get_query_test_count()

    validation(queries_loader=queries_loader,
               gallery_loader=gallery_loader,
               model=model,
               query_test_count=query_test_count)
Exemplo n.º 3
0
def main():
    # get dataset
    train_dataset = get_dataset(
        root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
        txt_file=os.path.join(Proj_Dir, 'data/train.txt'))
    val_dataset = get_dataset(
        root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
        txt_file=os.path.join(Proj_Dir, 'data/test.txt'))

    # prepare model
    resnet = resnet50(pretrained=False)

    # two criterion
    resnet.criterion = nn.CrossEntropyLoss()
    resnet.criterion2 = SiameseLoss()

    resnet.fc = nn.Linear(in_features=2048, out_features=625)
    resnet.load_state_dict_diy(
        torch.load(
            "/home/fanyang/PycharmProjects/PersonReID_CL/ckpt/no-cl-model-resnet50/last-layer-finetuned-model.pkl"
        ))

    # resnet.optimizer = optim.Adam(params=resnet.fc.parameters(), lr=1e-3)
    resnet.optimizer = optim.SGD(params=resnet.parameters(), lr=5e-4)

    resnet.cuda()

    # for record the train process
    writer_dir = "ckpt/siamese-model-resnet50"
    saver_dir = writer_dir
    writer = SummaryWriter(log_dir=os.path.join(Proj_Dir, writer_dir))

    routine = Routine2Criteion(model=resnet,
                               saver_dir=saver_dir,
                               writer=writer)

    while True:
        train_loader = get_loader(dataset=train_dataset, batch_size=BATCH_SIZE)
        val_loader = get_loader(dataset=val_dataset,
                                batch_size=BATCH_SIZE,
                                shuffle=False,
                                drop_last=False)

        routine.train_one_epoch(loader=train_loader,
                                record_n_times_per_epoch=400)

        # adjust the learning per epoch
        adjust_learning_rate(resnet.optimizer)

        routine.validation(loader=val_loader)
Exemplo n.º 4
0
def main():
    # get dataset
    train_dataset = get_dataset(
        root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
        txt_file=os.path.join(Proj_Dir, 'data/train.txt'))
    val_dataset = get_dataset(
        root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
        txt_file=os.path.join(Proj_Dir, 'data/test.txt'))

    # prepare model
    model = Inceptionv2()

    # two criterion
    model.criterion = nn.CrossEntropyLoss()
    model.criterion2 = CenterLoss(feature_len=1024, num_classes=625)

    # model.load_state_dict(
    #     torch.load(
    #         "/home/fanyang/PycharmProjects/PersonReID_CL/ckpt/no-cl-model-resnet50/last-layer-finetuned-model.pkl"))

    model.cuda()
    model.optimizer = optim.Adam(params=model.parameters(), lr=1e-3)
    # model.optimizer = optim.SGD(params=model.parameters(), lr=1e-3)

    # for record the train process
    writer_dir = "ckpt/cl-model-inceptionv2"
    saver_dir = writer_dir
    writer = SummaryWriter(log_dir=os.path.join(Proj_Dir, writer_dir))

    routine = Routine2Criteion(model=model, saver_dir=saver_dir, writer=writer)

    while True:
        train_loader = get_loader(dataset=train_dataset, batch_size=BATCH_SIZE)
        val_loader = get_loader(dataset=val_dataset,
                                batch_size=BATCH_SIZE,
                                shuffle=False,
                                drop_last=False)

        # print(type(batch_data))
        # print(type(batch_label))
        # exit()

        routine.train_one_epoch(loader=train_loader,
                                record_n_times_per_epoch=400)

        # adjust the learning per epoch
        adjust_learning_rate(model.optimizer)

        routine.validation(loader=val_loader)
Exemplo n.º 5
0
def main():
    val_dataset = get_dataset(
        root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
        txt_file=os.path.join(Proj_Dir, 'data/test.txt'))

    # prepare model
    inception = inception_v3(pretrained=False,
                             aux_logits=False,
                             transform_input=True)

    inception.fc = nn.Linear(in_features=2048, out_features=625)
    inception.load_state_dict(
        torch.load(
            "/home/fanyang/PycharmProjects/PersonReID_CL/ckpt/model-inceptionv3-transform-input.pkl"
        ))

    # inception.optimizer = optim.Adam(params=inception.fc.parameters(), lr=5e-4)

    # set the last layer to None for using the penultimate layer's feature

    inception.fc = None

    inception.cuda()

    # writer = SummaryWriter(log_dir=os.path.join(Proj_Dir, "ckpt/sgd-5e-5"))
    writer_for_embedding = SummaryWriter(
        log_dir=os.path.join(Proj_Dir, "ckpt/cnn_embedding"))

    val_loader = get_loader(dataset=val_dataset,
                            batch_size=BATCH_SIZE,
                            shuffle=True,
                            drop_last=False)

    validation(loader=val_loader, model=inception, writer=writer_for_embedding)
Exemplo n.º 6
0
def main():
    # get dataset
    train_dataset = get_dataset(
        root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
        txt_file=os.path.join(Proj_Dir, 'data/train.txt'))
    val_dataset = get_dataset(
        root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
        txt_file=os.path.join(Proj_Dir, 'data/test.txt'))

    # prepare model
    inceptionv2 = Inceptionv2()

    inceptionv2.criterion = nn.CrossEntropyLoss()

    inceptionv2.cuda()

    # inceptionv2.load_state_dict(
    #     torch.load(
    #         "/home/fanyang/PycharmProjects/PersonReID_CL/ckpt/model-inceptionv3-transform-input.pkl"))

    inceptionv2.optimizer = optim.Adam(params=inceptionv2.parameters(),
                                       lr=1e-3)
    # inception.optimizer = optim.SGD(params=inception.parameters(), lr=5e-4)

    # for record the train process
    writer_dir = "ckpt/no-cl-model-inceptionv2"
    saver_dir = writer_dir
    writer = SummaryWriter(log_dir=os.path.join(Proj_Dir, writer_dir))

    routine = Routine(model=inceptionv2, saver_dir=saver_dir, writer=writer)

    while True:
        train_loader = get_loader(dataset=train_dataset, batch_size=BATCH_SIZE)
        val_loader = get_loader(dataset=val_dataset,
                                batch_size=BATCH_SIZE,
                                shuffle=False,
                                drop_last=False)

        routine.train_one_epoch(loader=train_loader,
                                record_n_times_per_epoch=400)

        # adjust the learning per epoch
        adjust_learning_rate(inceptionv2.optimizer)

        routine.validation(loader=val_loader)
Exemplo n.º 7
0
def main():
    # get dataset
    # train_dataset = get_dataset(root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
    #                             txt_file=os.path.join(Proj_Dir, 'data/train.txt'))
    train_dataset = get_dataset_seq(
        root="/media/fanyang/workspace/DataSet/MARS/bbox_train",
        txt_file=os.path.join(Proj_Dir, 'data/id2folder.txt'),
        seq_len=SEQ_LEN)
    # prepare model
    model = MixModel(num_classes=625,
                     batch_size=BATCH_SIZE,
                     seq_len=SEQ_LEN,
                     transform_input=True)

    # model.load_inception_weights(
    #     "/home/fanyang/PycharmProjects/PersonReID_CL/ckpt/model-inceptionv3-transform-input.pkl")

    model.load_state_dict(
        torch.load(
            "/home/fanyang/PycharmProjects/PersonReID_CL/ckptseq/model-v1.pkl")
    )

    for param in model.inception.parameters():
        param.requires_grad = False

    # get the parameters to train
    parameters_need_to_train = []
    for param in model.inception_lstm.parameters():
        parameters_need_to_train.append(param)
    for param in model.fc.parameters():
        parameters_need_to_train.append(param)

    model.criterion = nn.CrossEntropyLoss()
    # model.optimizer = optim.Adam(params=parameters_need_to_train, lr=5e-4)
    model.optimizer = optim.SGD(params=parameters_need_to_train, lr=1e-4)
    model.cuda()

    writer = SummaryWriter(log_dir=os.path.join(Proj_Dir, "ckpt/seq-sgd-1e-4"))

    while True:
        train_loader = get_loader(dataset=train_dataset, batch_size=BATCH_SIZE)

        # print(type(batch_data))
        # print(type(batch_label))
        # exit()
        train_one_epoch(loader=train_loader, model=model, writer=writer)

        adjust_learning_rate(model.optimizer, decay_rate=.9)