Пример #1
0
    def __init__(self):

        parser = argparse.ArgumentParser()
        parser.add_argument('-d', '--sys_device_ids', type=eval, default=(0, ))
        parser.add_argument('-r', '--run', type=int, default=1)
        parser.add_argument('--set_seed', type=str2bool, default=False)
        parser.add_argument(
            '--dataset',
            type=str,
            default='market1501',
            choices=['market1501', 'cuhk03', 'duke', 'combined', 'customized'])
        parser.add_argument('--trainset_part',
                            type=str,
                            default='trainval',
                            choices=['trainval', 'train'])
        parser.add_argument('--customized_folder_path',
                            type=str,
                            default='customized')
        parser.add_argument('--resize_h_w', type=eval, default=(256, 128))
        # These several only for training set
        parser.add_argument('--crop_prob', type=float, default=0)
        parser.add_argument('--crop_ratio', type=float, default=1)
        parser.add_argument('--mirror', type=str2bool, default=True)
        parser.add_argument('--batch_size', type=int, default=64)

        parser.add_argument('--log_to_file', type=str2bool, default=True)
        parser.add_argument('--steps_per_log', type=int, default=20)
        parser.add_argument('--epochs_per_val', type=int, default=1)

        parser.add_argument('--last_conv_stride',
                            type=int,
                            default=1,
                            choices=[1, 2])
        # When the stride is changed to 1, we can compensate for the receptive field
        # using dilated convolution. However, experiments show dilated convolution is useless.
        parser.add_argument('--last_conv_dilation',
                            type=int,
                            default=1,
                            choices=[1, 2])
        parser.add_argument('--num_stripes', type=int, default=6)
        parser.add_argument('--local_conv_out_channels', type=int, default=256)

        parser.add_argument('--only_test', type=str2bool, default=False)
        parser.add_argument('--resume', type=str2bool, default=False)
        parser.add_argument('--exp_dir', type=str, default='')
        parser.add_argument('--model_weight_file', type=str, default='')

        parser.add_argument('--new_params_lr', type=float, default=0.1)
        parser.add_argument('--finetuned_params_lr', type=float, default=0.01)
        parser.add_argument('--staircase_decay_at_epochs',
                            type=eval,
                            default=(41, ))
        parser.add_argument('--staircase_decay_multiply_factor',
                            type=float,
                            default=0.1)
        parser.add_argument('--total_epochs', type=int, default=60)

        args = parser.parse_args()

        # gpu ids
        self.sys_device_ids = args.sys_device_ids

        # If you want to make your results exactly reproducible, you have
        # to fix a random seed.
        if args.set_seed:
            self.seed = 1
        else:
            self.seed = None

        # The experiments can be run for several times and performances be averaged.
        # `run` starts from `1`, not `0`.
        self.run = args.run

        ###########
        # Dataset #
        ###########

        # If you want to make your results exactly reproducible, you have
        # to also set num of threads to 1 during training.
        if self.seed is not None:
            self.prefetch_threads = 1
        else:
            self.prefetch_threads = 2

        self.dataset = args.dataset
        self.trainset_part = args.trainset_part

        # Image Processing

        # Just for training set
        self.crop_prob = args.crop_prob
        self.crop_ratio = args.crop_ratio
        self.resize_h_w = args.resize_h_w

        # Whether to scale by 1/255
        self.scale_im = True
        self.im_mean = [0.486, 0.459, 0.408]
        self.im_std = [0.229, 0.224, 0.225]

        self.train_mirror_type = 'random' if args.mirror else None
        self.train_batch_size = args.batch_size
        self.train_final_batch = False
        self.train_shuffle = True

        self.test_mirror_type = None
        self.test_batch_size = 32
        self.test_final_batch = True
        self.test_shuffle = False
        self.customized_folder_path = args.customized_folder_path

        dataset_kwargs = dict(
            name=self.dataset,
            resize_h_w=self.resize_h_w,
            scale=self.scale_im,
            im_mean=self.im_mean,
            im_std=self.im_std,
            batch_dims='NCHW',
            customized_folder_path=self.customized_folder_path,
            num_prefetch_threads=self.prefetch_threads)

        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.train_set_kwargs = dict(part=self.trainset_part,
                                     batch_size=self.train_batch_size,
                                     final_batch=self.train_final_batch,
                                     shuffle=self.train_shuffle,
                                     crop_prob=self.crop_prob,
                                     crop_ratio=self.crop_ratio,
                                     mirror_type=self.train_mirror_type,
                                     prng=prng)
        self.train_set_kwargs.update(dataset_kwargs)

        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.val_set_kwargs = dict(part='val',
                                   batch_size=self.test_batch_size,
                                   final_batch=self.test_final_batch,
                                   shuffle=self.test_shuffle,
                                   mirror_type=self.test_mirror_type,
                                   prng=prng)
        self.val_set_kwargs.update(dataset_kwargs)

        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.test_set_kwargs = dict(part='test',
                                    batch_size=self.test_batch_size,
                                    final_batch=self.test_final_batch,
                                    shuffle=self.test_shuffle,
                                    mirror_type=self.test_mirror_type,
                                    prng=prng)
        self.test_set_kwargs.update(dataset_kwargs)

        ###############
        # ReID Model  #
        ###############

        # The last block of ResNet has stride 2. We can set the stride to 1 so that
        # the spatial resolution before global pooling is doubled.
        self.last_conv_stride = args.last_conv_stride
        # When the stride is changed to 1, we can compensate for the receptive field
        # using dilated convolution. However, experiments show dilated convolution is useless.
        self.last_conv_dilation = args.last_conv_dilation
        # Number of stripes (parts)
        self.num_stripes = args.num_stripes
        # Output channel of 1x1 conv
        self.local_conv_out_channels = args.local_conv_out_channels

        #############
        # Training  #
        #############

        self.momentum = 0.9
        self.weight_decay = 0.0005

        # Initial learning rate
        self.new_params_lr = args.new_params_lr
        self.finetuned_params_lr = args.finetuned_params_lr
        self.staircase_decay_at_epochs = args.staircase_decay_at_epochs
        self.staircase_decay_multiply_factor = args.staircase_decay_multiply_factor
        # Number of epochs to train
        self.total_epochs = args.total_epochs

        # How often (in epochs) to test on val set.
        self.epochs_per_val = args.epochs_per_val

        # How often (in batches) to log. If only need to log the average
        # information for each epoch, set this to a large value, e.g. 1e10.
        self.steps_per_log = args.steps_per_log

        # Only test and without training.
        self.only_test = args.only_test

        self.resume = args.resume

        #######
        # Log #
        #######

        # If True,
        # 1) stdout and stderr will be redirected to file,
        # 2) training loss etc will be written to tensorboard,
        # 3) checkpoint will be saved
        self.log_to_file = args.log_to_file

        # The root dir of logs.
        if args.exp_dir == '':
            self.exp_dir = osp.join(
                'exp/train',
                '{}'.format(self.dataset),
                'run{}'.format(self.run),
            )
        else:
            self.exp_dir = args.exp_dir

        self.stdout_file = osp.join(self.exp_dir,
                                    'stdout_{}.txt'.format(time_str()))
        self.stderr_file = osp.join(self.exp_dir,
                                    'stderr_{}.txt'.format(time_str()))

        # Saving model weights and optimizer states, for resuming.
        self.ckpt_file = osp.join(self.exp_dir, 'ckpt.pth')
        # Just for loading a pretrained model; no optimizer states is needed.
        self.model_weight_file = args.model_weight_file
Пример #2
0
  def __init__(self):

    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--sys_device_ids', type=eval, default=(0,))
    parser.add_argument('-r', '--run', type=int, default=1)
    parser.add_argument('--set_seed', type=str2bool, default=False)
    parser.add_argument('--dataset', type=str, default='market1501',
                        choices=['market1501', 'cuhk03', 'duke', 'combined'])
    parser.add_argument('--trainset_part', type=str, default='trainval',
                        choices=['trainval', 'train'])

    parser.add_argument('--resize_h_w', type=eval, default=(384, 128))
    # These several only for training set
    parser.add_argument('--crop_prob', type=float, default=0)
    parser.add_argument('--crop_ratio', type=float, default=1)
    parser.add_argument('--mirror', type=str2bool, default=True)
    parser.add_argument('--batch_size', type=int, default=64)

    parser.add_argument('--log_to_file', type=str2bool, default=True)
    parser.add_argument('--steps_per_log', type=int, default=20)
    parser.add_argument('--epochs_per_val', type=int, default=1)

    parser.add_argument('--last_conv_stride', type=int, default=1, choices=[1, 2])
    # When the stride is changed to 1, we can compensate for the receptive field
    # using dilated convolution. However, experiments show dilated convolution is useless.
    parser.add_argument('--last_conv_dilation', type=int, default=1, choices=[1, 2])
    parser.add_argument('--num_stripes', type=int, default=6)
    parser.add_argument('--local_conv_out_channels', type=int, default=256)

    parser.add_argument('--only_test', type=str2bool, default=False)
    parser.add_argument('--resume', type=str2bool, default=False)
    parser.add_argument('--exp_dir', type=str, default='')
    parser.add_argument('--model_weight_file', type=str, default='')

    parser.add_argument('--new_params_lr', type=float, default=0.1)
    parser.add_argument('--finetuned_params_lr', type=float, default=0.01)
    parser.add_argument('--staircase_decay_at_epochs',
                        type=eval, default=(41,))
    parser.add_argument('--staircase_decay_multiply_factor',
                        type=float, default=0.1)
    parser.add_argument('--total_epochs', type=int, default=60)

    args = parser.parse_args()

    # gpu ids
    self.sys_device_ids = args.sys_device_ids

    # If you want to make your results exactly reproducible, you have
    # to fix a random seed.
    if args.set_seed:
      self.seed = 1
    else:
      self.seed = None

    # The experiments can be run for several times and performances be averaged.
    # `run` starts from `1`, not `0`.
    self.run = args.run

    ###########
    # Dataset #
    ###########

    # If you want to make your results exactly reproducible, you have
    # to also set num of threads to 1 during training.
    if self.seed is not None:
      self.prefetch_threads = 1
    else:
      self.prefetch_threads = 2

    self.dataset = args.dataset
    self.trainset_part = args.trainset_part

    # Image Processing

    # Just for training set
    self.crop_prob = args.crop_prob
    self.crop_ratio = args.crop_ratio
    self.resize_h_w = args.resize_h_w

    # Whether to scale by 1/255
    self.scale_im = True
    self.im_mean = [0.486, 0.459, 0.408]
    self.im_std = [0.229, 0.224, 0.225]

    self.train_mirror_type = 'random' if args.mirror else None
    self.train_batch_size = args.batch_size
    self.train_final_batch = False
    self.train_shuffle = True

    self.test_mirror_type = None
    self.test_batch_size = 32
    self.test_final_batch = True
    self.test_shuffle = False

    dataset_kwargs = dict(
      name=self.dataset,
      resize_h_w=self.resize_h_w,
      scale=self.scale_im,
      im_mean=self.im_mean,
      im_std=self.im_std,
      batch_dims='NCHW',
      num_prefetch_threads=self.prefetch_threads)

    prng = np.random
    if self.seed is not None:
      prng = np.random.RandomState(self.seed)
    self.train_set_kwargs = dict(
      part=self.trainset_part,
      batch_size=self.train_batch_size,
      final_batch=self.train_final_batch,
      shuffle=self.train_shuffle,
      crop_prob=self.crop_prob,
      crop_ratio=self.crop_ratio,
      mirror_type=self.train_mirror_type,
      prng=prng)
    self.train_set_kwargs.update(dataset_kwargs)

    prng = np.random
    if self.seed is not None:
      prng = np.random.RandomState(self.seed)
    self.val_set_kwargs = dict(
      part='val',
      batch_size=self.test_batch_size,
      final_batch=self.test_final_batch,
      shuffle=self.test_shuffle,
      mirror_type=self.test_mirror_type,
      prng=prng)
    self.val_set_kwargs.update(dataset_kwargs)

    prng = np.random
    if self.seed is not None:
      prng = np.random.RandomState(self.seed)
    self.test_set_kwargs = dict(
      part='test',
      batch_size=self.test_batch_size,
      final_batch=self.test_final_batch,
      shuffle=self.test_shuffle,
      mirror_type=self.test_mirror_type,
      prng=prng)
    self.test_set_kwargs.update(dataset_kwargs)

    ###############
    # ReID Model  #
    ###############

    # The last block of ResNet has stride 2. We can set the stride to 1 so that
    # the spatial resolution before global pooling is doubled.
    self.last_conv_stride = args.last_conv_stride
    # When the stride is changed to 1, we can compensate for the receptive field
    # using dilated convolution. However, experiments show dilated convolution is useless.
    self.last_conv_dilation = args.last_conv_dilation
    # Number of stripes (parts)
    self.num_stripes = args.num_stripes
    # Output channel of 1x1 conv
    self.local_conv_out_channels = args.local_conv_out_channels

    #############
    # Training  #
    #############

    self.momentum = 0.9
    self.weight_decay = 0.0005

    # Initial learning rate
    self.new_params_lr = args.new_params_lr
    self.finetuned_params_lr = args.finetuned_params_lr
    self.staircase_decay_at_epochs = args.staircase_decay_at_epochs
    self.staircase_decay_multiply_factor = args.staircase_decay_multiply_factor
    # Number of epochs to train
    self.total_epochs = args.total_epochs

    # How often (in epochs) to test on val set.
    self.epochs_per_val = args.epochs_per_val

    # How often (in batches) to log. If only need to log the average
    # information for each epoch, set this to a large value, e.g. 1e10.
    self.steps_per_log = args.steps_per_log

    # Only test and without training.
    self.only_test = args.only_test

    self.resume = args.resume

    #######
    # Log #
    #######

    # If True,
    # 1) stdout and stderr will be redirected to file,
    # 2) training loss etc will be written to tensorboard,
    # 3) checkpoint will be saved
    self.log_to_file = args.log_to_file

    # The root dir of logs.
    if args.exp_dir == '':
      self.exp_dir = osp.join(
        'exp/train',
        '{}'.format(self.dataset),
        'run{}'.format(self.run),
      )
    else:
      self.exp_dir = args.exp_dir

    self.stdout_file = osp.join(
      self.exp_dir, 'stdout_{}.txt'.format(time_str()))
    self.stderr_file = osp.join(
      self.exp_dir, 'stderr_{}.txt'.format(time_str()))

    # Saving model weights and optimizer states, for resuming.
    self.ckpt_file = osp.join(self.exp_dir, 'ckpt.pth')
    # Just for loading a pretrained model; no optimizer states is needed.
    self.model_weight_file = args.model_weight_file
    def __init__(self):

        parser = argparse.ArgumentParser()
        parser.add_argument('-d', '--sys_device_ids', type=eval, default=(0, ))
        parser.add_argument('--dataset',
                            type=str,
                            default='market1501',
                            choices=['market1501', 'cuhk03', 'duke'])

        parser.add_argument('--num_queries', type=int, default=16)
        parser.add_argument('--rank_list_size', type=int, default=10)

        parser.add_argument('--resize_h_w', type=eval, default=(384, 128))
        parser.add_argument('--last_conv_stride',
                            type=int,
                            default=1,
                            choices=[1, 2])
        parser.add_argument('--num_stripes', type=int, default=6)
        parser.add_argument('--local_conv_out_channels', type=int, default=256)

        parser.add_argument('--log_to_file', type=str2bool, default=True)
        parser.add_argument('--exp_dir', type=str, default='')
        parser.add_argument('--ckpt_file', type=str, default='')
        parser.add_argument('--model_weight_file', type=str, default='')

        args = parser.parse_args()

        # gpu ids
        self.sys_device_ids = args.sys_device_ids

        self.num_queries = args.num_queries
        self.rank_list_size = args.rank_list_size

        ###########
        # Dataset #
        ###########

        self.dataset = args.dataset
        self.prefetch_threads = 2

        # Image Processing

        self.resize_h_w = args.resize_h_w

        # Whether to scale by 1/255
        self.scale_im = True
        self.im_mean = [0.486, 0.459, 0.408]
        self.im_std = [0.229, 0.224, 0.225]

        self.test_mirror_type = None
        self.test_batch_size = 32
        self.test_final_batch = True
        self.test_shuffle = False

        dataset_kwargs = dict(name=self.dataset,
                              resize_h_w=self.resize_h_w,
                              scale=self.scale_im,
                              im_mean=self.im_mean,
                              im_std=self.im_std,
                              batch_dims='NCHW',
                              num_prefetch_threads=self.prefetch_threads)

        prng = np.random
        self.test_set_kwargs = dict(part='test',
                                    batch_size=self.test_batch_size,
                                    final_batch=self.test_final_batch,
                                    shuffle=self.test_shuffle,
                                    mirror_type=self.test_mirror_type,
                                    prng=prng)
        self.test_set_kwargs.update(dataset_kwargs)

        ###############
        # ReID Model  #
        ###############

        # The last block of ResNet has stride 2. We can set the stride to 1 so that
        # the spatial resolution before global pooling is doubled.
        self.last_conv_stride = args.last_conv_stride
        # Number of stripes (parts)
        self.num_stripes = args.num_stripes
        # Output channel of 1x1 conv
        self.local_conv_out_channels = args.local_conv_out_channels

        #######
        # Log #
        #######

        # If True, stdout and stderr will be redirected to file
        self.log_to_file = args.log_to_file

        # The root dir of logs.
        if args.exp_dir == '':
            self.exp_dir = osp.join(
                'exp/visualize_rank_list',
                '{}'.format(self.dataset),
            )
        else:
            self.exp_dir = args.exp_dir

        self.stdout_file = osp.join(self.exp_dir,
                                    'stdout_{}.txt'.format(time_str()))
        self.stderr_file = osp.join(self.exp_dir,
                                    'stderr_{}.txt'.format(time_str()))

        # Model weights and optimizer states, for resuming.
        self.ckpt_file = args.ckpt_file
        # Just for loading a pretrained model; no optimizer states is needed.
        self.model_weight_file = args.model_weight_file
Пример #4
0
    def __init__(self, gpus, dataset, feat_dims_per_part, \
        only_test, loop, save_dir):
        self.run = loop
        self.seed = None
        if self.seed is not None:
            self.prefetch_threads = 1
        else:
            self.prefetch_threads = 2
        self.trainset_part = 'trainval'
        
        # gpu ids
        self.sys_device_ids = gpus

        # dataset name
        self.dataset = dataset

        # Just for training set
        self.crop_prob = 0
        self.crop_ratio = 1

        self.resize_h_w = (384, 128)
        self.scale_im = True
        self.im_mean = [0.486, 0.459, 0.408]
        self.im_std = [0.229, 0.224, 0.225]

        self.train_mirror_type = 'random'
        self.train_batch_size = 64
        self.train_final_batch = False
        self.train_shuffle = True

        self.test_mirror_type = None
        self.test_batch_size = 32
        self.test_final_batch = True
        self.test_shuffle = False

        ## Dataset
        dataset_kwargs = dict(
            name=self.dataset,
            resize_h_w=self.resize_h_w,
            scale=self.scale_im,
            im_mean=self.im_mean,
            im_std=self.im_std,
            batch_dims='NCHW',
            num_prefetch_threads=self.prefetch_threads)

        # Training dataset
        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.train_set_kwargs = dict(
            part=self.trainset_part,
            batch_size=self.train_batch_size,
            final_batch=self.train_final_batch,
            shuffle=self.train_shuffle,
            crop_prob=self.crop_prob,
            crop_ratio=self.crop_ratio,
            mirror_type=self.train_mirror_type,
            prng=prng)
        self.train_set_kwargs.update(dataset_kwargs)
        # Valid dataset
        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.val_set_kwargs = dict(
            part='val',
            batch_size=self.test_batch_size,
            final_batch=self.test_final_batch,
            shuffle=self.test_shuffle,
            mirror_type=self.test_mirror_type,
            prng=prng)
        self.val_set_kwargs.update(dataset_kwargs)
        self.val_set = None
        # Test dataset
        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.test_set_kwargs = dict(
            part='test',
            batch_size=self.test_batch_size,
            final_batch=self.test_final_batch,
            shuffle=self.test_shuffle,
            mirror_type=self.test_mirror_type,
            prng=prng)
        self.test_set_kwargs.update(dataset_kwargs)

        ## ReID model
        self.last_conv_stride = 1
        self.last_conv_dilation = 1
        self.num_stripes = 6
        self.local_conv_out_channels = feat_dims_per_part
        
        ## Training
        self.momentum = 0.9
        self.weight_decay = 0.0005
        # Initial learning rate
        self.new_params_lr = 0.1
        self.finetuned_params_lr = 0.01
        self.staircase_decay_at_epochs = (41,)
        self.staircase_decay_multiply_factor = 0.1
        # Number of epochs to train
        self.total_epochs = 60
        # How often (in epochs) to test on val set.
        self.epochs_per_val = 1
        # How often (in batches) to log. If only need to log the average
        # information for each epoch, set this to a large value, e.g. 1e10.
        self.steps_per_log = 20
        # Only test and without training.
        self.only_test = only_test

        self.resume = False

        self.log_to_file = True
        # The root dir of logs.
        assert loop > 0
        if save_dir == '':
            self.exp_dir = osp.join(
            'exp/train',
            '{}'.format(self.dataset),
            'run{}'.format(self.run),
        )
        else:
            self.exp_dir = save_dir + '/' + str(loop)
        self.stdout_file = osp.join(
            self.exp_dir, 'stdout_{}.txt'.format(time_str()))
        self.stderr_file = osp.join(
            self.exp_dir, 'stderr_{}.txt'.format(time_str()))
        #ckpt_fname = 'ckpt' + str(loop) + '.pth'
        self.ckpt_file = osp.join(self.exp_dir, 'ckpt.pth')
    def __init__(self):

        parser = argparse.ArgumentParser()
        parser.add_argument('-d',
                            '--sys_device_ids',
                            type=eval,
                            default=(0, 1, 2, 3))
        parser.add_argument('-r', '--run', type=int, default=1)
        parser.add_argument('--set_seed', type=str2bool, default=False)
        parser.add_argument('--pcb_dataset', type=str, default='market1501',
                            choices=['market_png_4_1','market_png','market30_retain_pixel1','market30_retain_pixel2','market30_retain_pixel4',\
                            'market30_retain_pixel5','market30_retain_pixel6',\
                            'market30_retain_pixel7','market30_retain_pixel8','market30_retain_pixel9','market30_retain_pixel10',\
                            'market30_retain_pixel3','cuhk33_retain_3','market30_retain_pixel0_3_1','mars30_retain_pixel7','mars32_retain_pixel7','mars33_retain_pixel7','market30_retain_2','market30_retain_3','market30_retain_pixel0_2','market30_retain_pixel0_3',\
                            'market30_retain_pixel0','mars_oldmask_retain','mars','mars20','mars22','mars23','mars30','mars32','mars33','market',\
                            'cuhk20','cuhk22','cuhk23','cuhk20_retain','cuhk22_retain','cuhk23_retain','cuhk30','cuhk32','cuhk33',\
                            'cuhk30_retain','cuhk32_retain','cuhk33_retain','cuhk40','cuhk42','cuhk43','cuhk40_retain','cuhk42_retain',\
                            'cuhk43_retain','market1501','market_combined','market23','market22', 'market20','market20_retain','market22_retain',\
                            'market23_retain', 'market30','market32','market33','market30_retain','market32_retain','market33_retain',\
                            'market40','market42','market43','market40_retain','market42_retain','market43_retain','market_oldmask',\
                            'market_oldmask_retain','market_trans','market_png'])
        parser.add_argument('--triplet_dataset', type=str, default='market1501',
                            choices=['market_png_4_1','market_png','market30_retain_pixel3_rand_1','market30_retain_pixel1_4_1',\
                            'market30_retain_pixel2_4_1','market30_retain_pixel4_4_1','market30_retain_pixel5_4_1',\
                            'market30_retain_pixel6_4_1','market30_retain_pixel7_4_1','market30_retain_pixel8_4_1','market30_retain_pixel9_4_1',\
                            'market30_retain_pixel10_4_1','market30_retain_rand_1','market30_retain_pixel3_3_1','market30_retain_pixel3_4_1',\
                            'market30_retain_pixel3_5_3','market30_retain_pixel3_rand_1',\
                            'cuhk33_retain_3_1','cuhk33_retain_4','cuhk33_retain_4_1','cuhk33_retain_5','cuhk33_retain_5_3','cuhk33_retain_5_6',\
                            'market30_retain_3_1','market30_retain_4','market30_retain_4_1','market30_retain_5',\
                            'market30_retain_5_3','market30_retain_5_6','market33_retain_5','market33_retain_5_3','market33_retain_5_6',\
                            'market33_retain_3','market33_retain_3_1','market33_retain_4','market33_retain_4_1',\
                            'market30_retain_pixel0_4_1','market30_retain_pixel0_5_6','market30_retain_pixel0_5_3','market30_retain_pixel0_5',\
                            'market30_retain_pixel0_4_5','cuhk33_retain_3','market30_retain_pixel0_3_1','mars30_retain_pixel7','mars32_retain_pixel7',\
                            'mars33_retain_pixel7','market30_retain_2','market30_retain_3','market30_retain_pixel0_2','market30_retain_pixel0_3',\
                            'market30_retain_pixel0','mars_oldmask_retain','mars','mars20','mars22','mars23','mars30','mars32','mars33','market',\
                            'cuhk20','cuhk22','cuhk23','cuhk20_retain','cuhk22_retain','cuhk23_retain','cuhk30','cuhk32','cuhk33',\
                            'cuhk30_retain','cuhk32_retain','cuhk33_retain','cuhk40','cuhk42','cuhk43','cuhk40_retain','cuhk42_retain',\
                            'cuhk43_retain','market1501','market_combined','market23','market22', 'market20','market20_retain','market22_retain',\
                            'market23_retain', 'market30','market32','market33','market30_retain','market32_retain','market33_retain',\
                            'market40','market42','market43','market40_retain','market42_retain','market43_retain','market_oldmask',\
                            'market_oldmask_retain','market_trans','market_png'])
        parser.add_argument('--trainset_part',
                            type=str,
                            default='trainval',
                            choices=['trainval', 'train'])

        parser.add_argument('--resize_h_w', type=eval, default=(384, 128))
        # These several only for training set
        parser.add_argument('--crop_prob', type=float, default=0)
        parser.add_argument('--crop_ratio', type=float, default=1)
        parser.add_argument('--mirror', type=str2bool, default=True)
        parser.add_argument('--batch_size', type=int, default=64)
        parser.add_argument('--triplet_batch_size', type=int, default=192)

        parser.add_argument('--log_to_file', type=str2bool, default=True)
        parser.add_argument('--steps_per_log', type=int, default=20)
        parser.add_argument('--epochs_per_val', type=int, default=1)

        parser.add_argument('--last_conv_stride',
                            type=int,
                            default=1,
                            choices=[1, 2])
        # When the stride is changed to 1, we can compensate for the receptive field
        # using dilated convolution. However, experiments show dilated convolution is useless.
        parser.add_argument('--last_conv_dilation',
                            type=int,
                            default=1,
                            choices=[1, 2])
        parser.add_argument('--num_stripes', type=int, default=6)
        parser.add_argument('--num_cols', type=int, default=1)
        parser.add_argument('--local_conv_out_channels', type=int, default=256)

        parser.add_argument('--only_test', type=str2bool, default=False)
        parser.add_argument('--only_triplet', type=str2bool, default=False)
        parser.add_argument('--only_all', type=str2bool, default=False)
        parser.add_argument('--resume', type=str2bool, default=False)
        parser.add_argument('--exp_dir', type=str, default='')
        parser.add_argument('--model_weight_file', type=str, default='')

        parser.add_argument('--new_params_lr', type=float, default=0.1)
        parser.add_argument('--finetuned_params_lr', type=float, default=0.01)
        parser.add_argument('--triplet_finetuned_params_lr',
                            type=float,
                            default=0.001)
        parser.add_argument('--all_base_finetuned_params_lr',
                            type=float,
                            default=0.001)
        parser.add_argument('--all_new_finetuned_params_lr',
                            type=float,
                            default=0.01)
        parser.add_argument('--staircase_decay_at_epochs',
                            type=eval,
                            default=(41, ))
        parser.add_argument('--all_staircase_decay_at_epochs',
                            type=eval,
                            default=(25, ))
        parser.add_argument('--staircase_decay_multiply_factor',
                            type=float,
                            default=0.1)
        parser.add_argument('--triplet_staircase_decay_multiply_factor',
                            type=float,
                            default=1)
        parser.add_argument('--total_epochs', type=int, default=5)
        parser.add_argument('--triplet_epochs', type=int, default=5)
        parser.add_argument('--pcb_epochs', type=int, default=60)
        parser.add_argument('--margin',
                            type=float,
                            default=0.5,
                            metavar='MARGIN')
        parser.add_argument('--momentum',
                            type=float,
                            default=0.9,
                            metavar='MOMENTTUM')
        parser.add_argument('--weight_decay', type=float, default=0.0005)

        args = parser.parse_args()
        #lr
        self.triplet_finetuned_params_lr = args.triplet_finetuned_params_lr
        self.all_base_finetuned_params_lr = args.all_base_finetuned_params_lr
        self.all_new_finetuned_params_lr = args.all_new_finetuned_params_lr

        # gpu ids
        self.sys_device_ids = args.sys_device_ids

        #parts number
        self.parts_num = args.num_cols * args.num_stripes

        # margin
        self.margin = args.margin

        # If you want to make your results exactly reproducible, you have
        # to fix a random seed.
        if args.set_seed:
            self.seed = 1
        else:
            self.seed = None

        # The experiments can be run for several times and performances be averaged.
        # `run` starts from `1`, not `0`.
        self.run = args.run

        ###########
        # Dataset #
        ###########

        # If you want to make your results exactly reproducible, you have
        # to also set num of threads to 1 during training.
        if self.seed is not None:
            self.prefetch_threads = 1
        else:
            self.prefetch_threads = 2

        self.pcb_dataset = args.pcb_dataset
        self.triplet_dataset = args.triplet_dataset
        self.trainset_part = args.trainset_part

        # Image Processing

        # Just for training set
        self.crop_prob = args.crop_prob
        self.crop_ratio = args.crop_ratio
        self.resize_h_w = args.resize_h_w

        # Whether to scale by 1/255
        self.scale_im = True
        # self.im_mean = [0.486, 0.459, 0.408,0.5]
        # self.im_std = [0.229, 0.224, 0.225,0.5]
        self.im_mean = None
        self.im_std = None

        self.train_mirror_type = 'random' if args.mirror else None
        self.pcb_train_batch_size = args.batch_size
        self.train_batch_size = args.triplet_batch_size
        self.train_final_batch = False
        #modify the train_shuffle and keep the corresponding relation
        self.train_shuffle = False
        self.pcb_train_shuffle = True

        self.test_mirror_type = None
        self.test_batch_size = 32
        self.test_final_batch = True
        self.test_shuffle = False
        self.pcb_test_shuffle = True

        triplet_dataset_kwargs = dict(
            name=self.triplet_dataset,
            resize_h_w=self.resize_h_w,
            scale=self.scale_im,
            im_mean=self.im_mean,
            im_std=self.im_std,
            batch_dims='NCHW',
            num_prefetch_threads=self.prefetch_threads)
        pcb_dataset_kwargs = dict(name=self.pcb_dataset,
                                  resize_h_w=self.resize_h_w,
                                  scale=self.scale_im,
                                  im_mean=self.im_mean,
                                  im_std=self.im_std,
                                  batch_dims='NCHW',
                                  num_prefetch_threads=self.prefetch_threads)
        """
    pcb dataset
    """
        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.pcb_train_set_kwargs = dict(part=self.trainset_part,
                                         batch_size=self.pcb_train_batch_size,
                                         final_batch=self.train_final_batch,
                                         shuffle=self.pcb_train_shuffle,
                                         crop_prob=self.crop_prob,
                                         crop_ratio=self.crop_ratio,
                                         mirror_type=self.train_mirror_type,
                                         prng=prng)
        self.pcb_train_set_kwargs.update(pcb_dataset_kwargs)
        """
    all dataset
    """
        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.all_train_set_kwargs = dict(part=self.trainset_part,
                                         batch_size=self.train_batch_size,
                                         final_batch=self.train_final_batch,
                                         shuffle=self.pcb_train_shuffle,
                                         crop_prob=self.crop_prob,
                                         crop_ratio=self.crop_ratio,
                                         mirror_type=self.train_mirror_type,
                                         prng=prng)
        self.all_train_set_kwargs.update(pcb_dataset_kwargs)
        """
    triplet dataset
    """
        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.train_set_anchor_kwargs = dict(flag='anchor',
                                            part=self.trainset_part,
                                            batch_size=self.train_batch_size,
                                            final_batch=self.train_final_batch,
                                            shuffle=self.train_shuffle,
                                            crop_prob=self.crop_prob,
                                            crop_ratio=self.crop_ratio,
                                            mirror_type=self.train_mirror_type,
                                            prng=prng)
        self.train_set_anchor_kwargs.update(triplet_dataset_kwargs)

        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.train_set_positive_kwargs = dict(
            flag='positive',
            part=self.trainset_part,
            batch_size=self.train_batch_size,
            final_batch=self.train_final_batch,
            shuffle=self.train_shuffle,
            crop_prob=self.crop_prob,
            crop_ratio=self.crop_ratio,
            mirror_type=self.train_mirror_type,
            prng=prng)
        self.train_set_positive_kwargs.update(triplet_dataset_kwargs)

        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.train_set_negative_kwargs = dict(
            flag='negative',
            part=self.trainset_part,
            batch_size=self.train_batch_size,
            final_batch=self.train_final_batch,
            shuffle=self.train_shuffle,
            crop_prob=self.crop_prob,
            crop_ratio=self.crop_ratio,
            mirror_type=self.train_mirror_type,
            prng=prng)
        self.train_set_negative_kwargs.update(triplet_dataset_kwargs)

        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.val_set_kwargs = dict(part='val',
                                   batch_size=self.test_batch_size,
                                   final_batch=self.test_final_batch,
                                   shuffle=self.test_shuffle,
                                   mirror_type=self.test_mirror_type,
                                   prng=prng)
        self.val_set_kwargs.update(triplet_dataset_kwargs)

        prng = np.random
        if self.seed is not None:
            prng = np.random.RandomState(self.seed)
        self.test_set_kwargs = dict(part='test',
                                    batch_size=self.test_batch_size,
                                    final_batch=self.test_final_batch,
                                    shuffle=self.test_shuffle,
                                    mirror_type=self.test_mirror_type,
                                    prng=prng)
        self.test_set_kwargs.update(triplet_dataset_kwargs)

        ###############
        # ReID Model  #
        ###############

        # The last block of ResNet has stride 2. We can set the stride to 1 so that
        # the spatial resolution before global pooling is doubled.
        self.last_conv_stride = args.last_conv_stride
        # When the stride is changed to 1, we can compensate for the receptive field
        # using dilated convolution. However, experiments show dilated convolution is useless.
        self.last_conv_dilation = args.last_conv_dilation
        # Number of stripes (parts)
        self.num_stripes = args.num_stripes
        self.num_cols = args.num_cols
        # Output channel of 1x1 conv
        self.local_conv_out_channels = args.local_conv_out_channels

        #############
        # Training  #
        #############

        self.momentum = args.momentum
        self.weight_decay = args.weight_decay

        # Initial learning rate
        self.new_params_lr = args.new_params_lr
        self.finetuned_params_lr = args.finetuned_params_lr
        self.staircase_decay_at_epochs = args.staircase_decay_at_epochs
        self.staircase_decay_multiply_factor = args.staircase_decay_multiply_factor
        self.triplet_staircase_decay_multiply_factor = args.triplet_staircase_decay_multiply_factor
        self.all_staircase_decay_at_epochs = args.all_staircase_decay_at_epochs
        # Number of epochs to train
        self.total_epochs = args.total_epochs
        self.triplet_epochs = args.triplet_epochs
        self.pcb_epochs = args.pcb_epochs

        # How often (in epochs) to test on val set.
        self.epochs_per_val = args.epochs_per_val

        # How often (in batches) to log. If only need to log the average
        # information for each epoch, set this to a large value, e.g. 1e10.
        self.steps_per_log = args.steps_per_log

        # Only test and without training.
        self.only_test = args.only_test

        #Only triplet without pcb training ,load pcb_ckpt file
        self.only_triplet = args.only_triplet
        self.only_all = args.only_all

        self.resume = args.resume

        #######
        # Log #
        #######

        # If True,
        # 1) stdout and stderr will be redirected to file,
        # 2) training loss etc will be written to tensorboard,
        # 3) checkpoint will be saved
        self.log_to_file = args.log_to_file

        # The root dir of logs.
        if args.exp_dir == '':
            self.exp_dir = osp.join(
                'exp/train',
                '{}'.format(self.pcb_dataset),
                'run{}'.format(self.run),
            )
        else:
            self.exp_dir = args.exp_dir

        self.stdout_file = osp.join(self.exp_dir,
                                    'stdout_{}.txt'.format(time_str()))
        self.stderr_file = osp.join(self.exp_dir,
                                    'stderr_{}.txt'.format(time_str()))

        # Saving model weights and optimizer states, for resuming.
        self.pcb_ckpt_file = osp.join(self.exp_dir, 'pcb_ckpt.pth')
        self.triplet_ckpt_file = osp.join(self.exp_dir, 'triplet_ckpt.pth')
        self.ckpt_file = osp.join(self.exp_dir, 'ckpt.pth')
        # Just for loading a pretrained model; no optimizer states is needed.
        self.model_weight_file = args.model_weight_file
  def __init__(self):

    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--sys_device_ids', type=eval, default=(0,))
    parser.add_argument('--dataset', type=str, default='market1501',
                        choices=['market1501', 'cuhk03', 'duke'])

    parser.add_argument('--num_queries', type=int, default=16)
    parser.add_argument('--rank_list_size', type=int, default=10)

    parser.add_argument('--resize_h_w', type=eval, default=(384, 128))
    parser.add_argument('--last_conv_stride', type=int, default=1,
                        choices=[1, 2])
    parser.add_argument('--num_stripes', type=int, default=6)
    parser.add_argument('--local_conv_out_channels', type=int, default=256)

    parser.add_argument('--log_to_file', type=str2bool, default=True)
    parser.add_argument('--exp_dir', type=str, default='')
    parser.add_argument('--ckpt_file', type=str, default='')
    parser.add_argument('--model_weight_file', type=str, default='')

    args = parser.parse_args()

    # gpu ids
    self.sys_device_ids = args.sys_device_ids

    self.num_queries = args.num_queries
    self.rank_list_size = args.rank_list_size

    ###########
    # Dataset #
    ###########

    self.dataset = args.dataset
    self.prefetch_threads = 2

    # Image Processing

    self.resize_h_w = args.resize_h_w

    # Whether to scale by 1/255
    self.scale_im = True
    self.im_mean = [0.486, 0.459, 0.408]
    self.im_std = [0.229, 0.224, 0.225]

    self.test_mirror_type = None
    self.test_batch_size = 32
    self.test_final_batch = True
    self.test_shuffle = False

    dataset_kwargs = dict(
      name=self.dataset,
      resize_h_w=self.resize_h_w,
      scale=self.scale_im,
      im_mean=self.im_mean,
      im_std=self.im_std,
      batch_dims='NCHW',
      num_prefetch_threads=self.prefetch_threads)

    prng = np.random
    self.test_set_kwargs = dict(
      part='test',
      batch_size=self.test_batch_size,
      final_batch=self.test_final_batch,
      shuffle=self.test_shuffle,
      mirror_type=self.test_mirror_type,
      prng=prng)
    self.test_set_kwargs.update(dataset_kwargs)

    ###############
    # ReID Model  #
    ###############

    # The last block of ResNet has stride 2. We can set the stride to 1 so that
    # the spatial resolution before global pooling is doubled.
    self.last_conv_stride = args.last_conv_stride
    # Number of stripes (parts)
    self.num_stripes = args.num_stripes
    # Output channel of 1x1 conv
    self.local_conv_out_channels = args.local_conv_out_channels

    #######
    # Log #
    #######

    # If True, stdout and stderr will be redirected to file
    self.log_to_file = args.log_to_file

    # The root dir of logs.
    if args.exp_dir == '':
      self.exp_dir = osp.join(
        'exp/visualize_rank_list',
        '{}'.format(self.dataset),
      )
    else:
      self.exp_dir = args.exp_dir

    self.stdout_file = osp.join(
      self.exp_dir, 'stdout_{}.txt'.format(time_str()))
    self.stderr_file = osp.join(
      self.exp_dir, 'stderr_{}.txt'.format(time_str()))

    # Model weights and optimizer states, for resuming.
    self.ckpt_file = args.ckpt_file
    # Just for loading a pretrained model; no optimizer states is needed.
    self.model_weight_file = args.model_weight_file