help="train and val sets together for training, " "val set alone for validation") parser.add_argument('--batch-id', type=str, default='2018-03-26-16:21:21') parser.add_argument('--num-instances', type=int, default=4, help="each minibatch consist of " "(batch_size // num_instances) identities, and " "each identity has num_instances instances, " "default: 4") # model parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.names()) parser.add_argument('--features', type=int, default=128) parser.add_argument('--dropout', type=float, default=0) # loss parser.add_argument('--margin', type=float, default=0.5, help="margin of the triplet loss, default: 0.5") # optimizer parser.add_argument('--lr', type=float, default=0.0002, help="learning rate of all parameters") parser.add_argument('--weight-decay', type=float, default=5e-4) # training configs parser.add_argument('--resume', type=str, default='', metavar='PATH')
def __init__(self): self.parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) self.parser.add_argument('--stage', type=int, default=1, help='training stage [1|2]') self.parser.add_argument('-d', '--dataset', type=str, default='market1501', choices=datasets.names()) self.parser.add_argument('-t', '--target', type=str, default='dukemtmc', choices=datasets.names()) self.parser.add_argument('-s', '--source', type=str, default='market1501', choices=datasets.names()) # paths self.parser.add_argument( '--dataroot', type=str, default='./datasets/', help= 'root path to datasets (should have subfolders market1501, dukemtmc, cuhk03, etc)' ) self.parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='root path to save models') self.parser.add_argument('--name', type=str, default='FD-GAN', help='directory to save models') self.parser.add_argument( '--netE-pretrain', type=str, default='pretrained model path for net_E in stage 2') self.parser.add_argument( '--netG-pretrain', type=str, default='pretrained model path for net_G in stage 2') self.parser.add_argument( '--tar-netG-pretrain', type=str, default='pretrained model path for net_G in stage 2') self.parser.add_argument( '--netDp-pretrain', type=str, default='pretrained model path for net_Dp in stage 2') self.parser.add_argument( '--tar-netDi-pretrain', type=str, default='pretrained model path for net_Di in stage 2') self.parser.add_argument( '--netDi-pretrain', type=str, default='pretrained model path for net_Di in stage 2') # model structures self.parser.add_argument('--arch', type=str, default='resnet50', choices=models.names()) self.parser.add_argument( '--norm', type=str, default='batch', help='instance normalization or batch normalization') self.parser.add_argument('--drop', type=float, default=0.2, help='dropout for the netG') self.parser.add_argument('--connect-layers', type=int, default=0, help='skip connections num for netG') self.parser.add_argument( '--fuse-mode', type=str, default='cat', help='method to fuse reid feature and pose feature [cat|add]') self.parser.add_argument('--pose-feature-size', type=int, default=128, help='length of feature vector for pose') self.parser.add_argument('--noise-feature-size', type=int, default=256, help='length of feature vector for noise') self.parser.add_argument('--pose-aug', type=str, default='no', help='posemap augmentation [no|erase|gauss]') # dataloader setting self.parser.add_argument('-b', '--batch-size', type=int, default=16, help='input batch size') self.parser.add_argument('-j', '--workers', default=4, type=int, help='num threads for loading data') self.parser.add_argument('--width', type=int, default=128, help='input image width') self.parser.add_argument('--height', type=int, default=256, help='input image height') # optimizer setting self.parser.add_argument('--niter', type=int, default=50, help='# of iter at starting learning rate') self.parser.add_argument( '--niter-decay', type=int, default=50, help='# of iter to linearly decay learning rate to zero') self.parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate') self.parser.add_argument( '--save-step', type=int, default=2, help='frequency of saving checkpoints at the end of epochs') self.parser.add_argument( '--eval-step', type=int, default=10, help='frequency of evaluate checkpoints at the end of epochs') # visualization setting self.parser.add_argument('--display-port', type=int, default=6006, help='visdom port of the web display') self.parser.add_argument( '--display-id', type=int, default=1, help='window id of the web display, set 0 for non-usage of visdom') self.parser.add_argument('--display-winsize', type=int, default=256, help='display window size') self.parser.add_argument( '--display-freq', type=int, default=10, help='frequency of showing training results on screen') self.parser.add_argument( '--display-single-pane-ncols', type=int, default=0, help= 'if positive, display all images in a single visdom web panel with certain number of images per row.' ) self.parser.add_argument( '--update-html-freq', type=int, default=100, help='frequency of saving training results to html') self.parser.add_argument( '--no_html', action='store_true', help= 'do not save intermediate training results to [opt.checkpoints]/name/web/' ) self.parser.add_argument( '--print-freq', type=int, default=10, help='frequency of showing training results on console') # training schedule self.parser.add_argument('--lambda-recon', type=float, default=1.0, help='loss weight of loss_r') self.parser.add_argument('--lambda-veri', type=float, default=1.0, help='loss weight of loss_v') self.parser.add_argument('--lambda-sp', type=float, default=1.0, help='loss weight of loss_sp') self.parser.add_argument('--lambda-mmd', type=float, default=1.0, help='loss weight of loss_mmd') self.parser.add_argument('--lambda-tri', type=float, default=10.0, help='loss weight of loss_tri') self.parser.add_argument('--smooth-label', action='store_true', help='smooth label or not for GANloss') self.parser.add_argument('--tri-margin', type=float, default=0.5, help='margin for Tripletloss') self.opt = self.parser.parse_args() self.show_opt()
parser.add_argument('--ev', type=int, default=1) # parser.add_argument('--clock', type=bool, default=True) # 是否记时 # parser.add_argument('--is_baseline', type=bool, default=False) # 默认不是baseline # the key parameters is following parser.add_argument('--total_step', type=int, default=5) # 默认总的五次迭代. # parser.add_argument('--train_tagper_step', type=float, default=3) # 用于训练 tagper的 step 数 parser.add_argument('--epoch', type=int, default=70) parser.add_argument('--step_size', type=int, default=55) parser.add_argument('-b', '--batch_size', type=int, default=16) '''new''' parser.add_argument('--p', type=int, default=1) # 采样曲线的指数 parser.add_argument('--baba', type=float, default=2) # tagper的训练数量reid的baba倍数,感觉2应该是上线了. # 下面是暂时不知道用来做什么的参数 parser.add_argument('-a', '--arch', type=str, default='avg_pool', choices=models.names()) # eug model_name parser.add_argument('-i', '--iter-step', type=int, default=5) parser.add_argument('-g', '--gamma', type=float, default=0.3) parser.add_argument('-l', '--l', type=float) parser.add_argument('--continuous', action="store_true") main(parser.parse_args()) ''' python3.6 atm06.py --total_step 5 --exp_order 6 python3.6 atm06.py --total_step 6 --exp_order 7 --p 1 --baba 1.5 python3.6 atm06.py --total_step 6 --exp_order 8 --p 1 --baba 2 --max_frames 400 '''
default='.', help='path to reid dataset') parser.add_argument('-s', '--source', type=str, default='dukemtmc', choices=datasets.names()) parser.add_argument('-t', '--target', type=str, default='market1501', choices=datasets.names()) parser.add_argument('-m', '--mte', type=str, default='personx', choices=datasets.names()) parser.add_argument('--batch_size', type=int, default=64, required=True, help='number of examples/minibatch') parser.add_argument('--num_batches', type=int, required=False, help='number of batches (default entire dataset)') parser.add_argument('--resume', type=str, default='', metavar='PATH') parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.names()) parser.add_argument('--split', type=int, default=0) parser.add_argument('--height', type=int, default=256, help="input height, default: 256 for resnet*, " "144 for inception") parser.add_argument('--width', type=int, default=128, help="input width, default: 128 for resnet*, " "56 for inception") parser.add_argument('--num-instances', type=int, default=8, help="each minibatch consist of " "(batch_size // num_instances) identities, and " "each identity has num_instances instances, " "default: 4") parser.add_argument('--combine-trainval', action='store_true', help="train and val sets together for training, "
BuMain.evaluate(dataset_all.query, dataset_all.gallery) # get new train data for the next iteration print('----------------------------------------bottom-up clustering------------------------------------------------') cluster_id_labels, new_train_data = BuMain.get_new_train_data(cluster_id_labels, nums_to_merge, size_penalty=args.size_penalty) print('\n\n') if __name__ == '__main__': parser = argparse.ArgumentParser(description='bottom-up clustering') parser.add_argument('-d', '--dataset', type=str, default='custom', choices=datasets.names()) parser.add_argument('-b', '--batch-size', type=int, default=16) parser.add_argument('-f', '--fea', type=int, default=2048) parser.add_argument('-a', '--arch', type=str, default='avg_pool',choices=models.names()) working_dir = os.path.dirname(os.path.abspath(__file__)) parser.add_argument('--data_dir', type=str, metavar='PATH', default=os.path.join(working_dir,'data')) parser.add_argument('--logs_dir', type=str, metavar='PATH', default=os.path.join(working_dir,'logs')) parser.add_argument('--max_frames', type=int, default=900) parser.add_argument('--loss', type=str, default='ExLoss') parser.add_argument('-m', '--momentum', type=float, default=0.5) parser.add_argument('-s', '--step_size', type=int, default=55) parser.add_argument('--size_penalty',type=float, default=0.005) parser.add_argument('-mp', '--merge_percent',type=float, default=0.05) main(parser.parse_args())
help="input height, default: 256 for resnet*, " "144 for inception") parser.add_argument('--width', type=int, help="input width, default: 128 for resnet*, " "56 for inception") parser.add_argument('--combine-trainval', action='store_true', help="train and val sets together for training, " "val set alone for validation") parser.add_argument('--num-instances', type=int, default=4, help="each minibatch consist of " "(batch_size // num_instances) identities, and " "each identity has num_instances instances, " "default: 4") # model parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.names()) parser.add_argument('--features', type=int, default=128) parser.add_argument('--dropout', type=float, default=0) # loss parser.add_argument('--margin', type=float, default=0.5, help="margin of the triplet loss, default: 0.5") # optimizer parser.add_argument('--lr', type=float, default=0.0001, help="learning rate of all parameters") parser.add_argument('--weight-decay', type=float, default=5e-4) # training configs parser.add_argument('--resume', type=str, default='', metavar='PATH') parser.add_argument('--resume_from_trip', type=str, default='', metavar='PATH') parser.add_argument('--evaluate', action='store_true', help="evaluation only") parser.add_argument('--only_train_metric', action='store_true',