def parse_args(): parser = TrainingParser( description='Distance metric learning using prototypical loss', default_logfile='train_prototype.log', default_model_prefix='prototype_model') parser.add_argument( '--batch-size', type=int, default=32, help='Number of samples in a batch per device. Default is 32') parser.add_argument( '--nc', type=int, default=12, help='Number of classes in each episode. Default is 12') parser.add_argument( '--nq', type=int, default=5, help='Number of query examples in each episode. Default is 5.') parser.add_argument( '--ns', type=int, default=5, help='Number of support examples in each episode. Default is 5.') parser.add_argument('--epochs', type=int, default=30, help='number of training epochs. default is 30.') parser.add_argument('--iteration-per-epoch', type=int, default=100, help='Number of iterations per epoch. Default is 100') parser.add_argument('--lr', type=float, default=0.0001, help='learning rate. default is 0.0001.') parser.add_argument('--factor', type=float, default=0.5, help='learning rate schedule factor. default is 0.5.') parser.add_argument('--wd', type=float, default=0.00001, help='weight decay rate. default is 0.00001.') parser.add_argument( '--steps', type=str, default='12,14,16,18', help='epochs to update learning rate. default is 12,14,16,18.') opt = parser.parse_args() if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt
def parse_args(): parser = TrainingParser( description='Distance metric learning using structured clusture loss', default_logfile='train_clusterloss.log', default_model_prefix='clusterloss_model') parser.add_argument('--batch-size', type=int, default=120, help='Number of samples in a batch. Default is 120') parser.add_argument( '--batch-k', type=int, default=5, help= 'Number of images per class in a batch. Used only if iteration-per-epoch > 0. Default is 5.' ) parser.add_argument('--epochs', type=int, default=25, help='number of training epochs. default is 25.') parser.add_argument('--epsilon', type=float, default=1e-7, help='learning rate. default is 0.01.') parser.add_argument('--lr', type=float, default=0.0001, help='learning rate. default is 0.0001.') parser.add_argument('--factor', type=float, default=0.5, help='learning rate schedule factor. default is 0.5.') parser.add_argument('--wd', type=float, default=0.00001, help='weight decay rate. default is 0.00001.') parser.add_argument( '--steps', type=str, default='12,14,16,18', help='epochs to update learning rate. default is 12,14,16,18.') parser.add_argument( '--iteration-per-epoch', type=int, default=0, help='Number of iterations per epoch for iteration-based training') parser.add_argument('--decrease-cnn-lr', action="store_true", help='Use a lower LR on the feature extractor') opt = parser.parse_args() if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt
def parse_args(): parser = TrainingParser(description='Distance metric learning using normproxies', default_logfile='train_normproxy.log', default_model_prefix='normproxy_model') parser.add_argument('--batch-size', type=int, default=75, help='Number of samples in a batch. Default is 75') parser.add_argument('--epochs', type=int, default=30, help='number of training epochs. default is 60.') parser.add_argument('--lr', type=float, default=0.0001, help='learning rate. default is 0.0001.') parser.add_argument('--factor', type=float, default=0.1, help='learning rate schedule factor. default is 0.5.') parser.add_argument('--wd', type=float, default=0.00001, help='weight decay rate. default is 0.00001.') parser.add_argument('--steps', type=str, default='15', help='Epochs to update learning rate. Negative number represents periodic decrease.' 'Zero means no steps. Default is -1') parser.add_argument('--binarize', action="store_true", help='Thresholds the embedding into a binary vector') parser.add_argument('--epsilon', type=float, default=1e-2, help='Optimizer epsilon. default is 0.01.') parser.add_argument('--label-smooth', type=float, default=0.1, help='Label smoothing. Default is 0') parser.add_argument('--temperature', type=float, default=0.05, help='Sigma temperature constant. Default is 0.05') parser.add_argument('--batch-k', type=int, default=0, help='Number of images per class for episodic sampling. 0 Will turn it off.') parser.add_argument('--start-epoch', type=int, default=1, help='Epoch to start at, >1 means loading parameters') parser.add_argument('--no-fc', action="store_true", help='Skips the fully-connected layer in the model.') parser.add_argument('--static-proxies', action="store_true", help='Proxies will not be learned.') parser.add_argument('--no-dropout', dest='dropout', action="store_false", help='Do not add dropout layer to the model.') parser.add_argument('--similarity', type=str, choices=['euclidean', 'cosine'], default='euclidean') parser.set_defaults( embed_dim=2048, lr=0.001, wd=1e-4, ) opt = parser.parse_args() if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt
def parse_args(): parser = TrainingParser( description= 'Distance metric learning using triplet loss with semihard mining', default_logfile='train_triplet_semihard.log', default_model_prefix='triplet_semihard_model') parser.add_argument( '--batch-size', type=int, default=128, help='Number of samples in a batch per compute unit. Default is 128.') parser.add_argument('--epochs', type=int, default=60, help='number of training epochs. default is 60.') parser.add_argument('--lr', type=float, default=0.0001, help='learning rate. default is 0.0001.') parser.add_argument('--factor', type=float, default=0.5, help='learning rate schedule factor. default is 0.5.') parser.add_argument('--wd', type=float, default=0.00001, help='weight decay rate. default is 0.00001.') parser.add_argument( '--steps', type=str, default='20,30,40', help='epochs to update learning rate. default is 20,30,40.') opt = parser.parse_args() if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt
def parse_args(): parser = TrainingParser(description='Deep Randomized Ensembles for Metric Learning', default_logfile='train_dreml.log', default_model_prefix='dreml_model') parser.add_argument('--batch-size', type=int, default=128, help='Number of samples in a batch. Default is 128') parser.add_argument('--epochs', type=int, default=12, help='number of training epochs. default is 20.') parser.add_argument('--lr', type=float, default=0.01, help='learning rate. default is 0.0001.') parser.add_argument('--factor', type=float, default=0.1, help='learning rate schedule factor. default is 0.1.') parser.add_argument('--wd', type=float, default=5e-4, help='weight decay rate. default is 5e-4.') parser.add_argument('--steps', type=str, default='-4', help='Epochs to update learning rate. Negative number represents periodic decrease.' 'Zero means no steps. Default is -4') parser.add_argument('--loss', type=str, default='nca', help='Which loss to use: [triplet, nca, xentropy]') parser.add_argument('--epsilon', type=float, default=1e-2, help='Optimizer epsilon. default is 0.01.') parser.add_argument('--label-smooth', type=float, default=0, help='Label smoothing. Default is 0') parser.add_argument('--embedding-multiplier', type=float, default=3, help='Multiplies normalized embeddings and proxies. Default is 3') parser.add_argument('-L', '--number-of-ensembles', dest='L', type=int, default=48, help='Number of ensembles.') parser.add_argument('-D', '--meta-classes', dest='D', type=int, default=12, help='Number of meta-classes.') parser.add_argument('--static-proxies', action="store_true", help='Do not learn proxies, but keep them fixed.') parser.add_argument('--data-shape', type=int, default=224, help='Input data size') opt = parser.parse_args() if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt
def parse_args(): parser = TrainingParser( description='Distance metric learning using proxies', default_logfile='train_proxy.log', default_model_prefix='proxy_model') parser.add_argument( '--batch-size', type=int, default=32, help='training batch size per device (CPU/GPU). default is 32.') parser.add_argument( '--batch-k', type=int, default=5, help= 'Number of images per class in a batch. Used only if iteration-per-epoch > 0. Default is 5.' ) parser.add_argument( '--loss', type=str, default='triplet', help='Which loss to use: [nca, triplet, proxymargin, xentropy]') parser.add_argument('--epsilon', type=float, default=1e-2, help='Optimizer epsilon. default is 0.01.') parser.add_argument( '--lr', default=None, type=float, help= 'Learning rate for the whole model. Overwrites specific learning rates.' ) parser.add_argument('--lr-embedding', default=1e-5, type=float, help='Learning rate for embedding.') parser.add_argument( '--lr-inception', default=1e-3, type=float, help='Learning rate for Inception, excluding embedding layer.') parser.add_argument('--lr-proxynca', default=1e-3, type=float, help='Learning rate for proxies of Proxy NCA.') parser.add_argument('--wd', type=float, default=5e-4, help='weight decay rate. default is 5e-4.') parser.add_argument('--factor', type=float, default=1e-1, help='learning rate schedule factor. default is 1e-1.') parser.add_argument('--epochs', type=int, default=20, help='number of training epochs. default is 20.') parser.add_argument( '--steps', type=str, default='3,10,16', help= 'Epochs to update learning rate. Negative number represents periodic decrease.' 'Zero means no steps. Default is 3,10,16') parser.add_argument( '--iteration-per-epoch', type=int, default=0, help='Number of iterations per epoch for iteration-based training') parser.add_argument('--label-smooth', type=float, default=0, help='Label smoothing. Default is 0') parser.add_argument( '--embedding-multiplier', type=float, default=3, help='Multiplies normalized embeddings and proxies. Default is 3') parser.add_argument( '--temperature', type=float, default=1.0, help='Temperature scaling for NCA and XEntropy losses. Default is 1') opt = parser.parse_args() if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.loss, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt
def parse_args(): parser = TrainingParser( description='Distance metric learning using angular loss', default_logfile='train_angular.log', default_model_prefix='angular_model') parser.add_argument( '--batch-size', type=int, default=128, help='Number of samples in a batch, this equals to 2N. Default is 128') parser.add_argument('--epochs', type=int, default=30, help='number of training epochs. default is 25.') parser.add_argument('--epoch-length', type=int, default=200, help='Number of iterations per epoch. Default is 200.') parser.add_argument('--lr', type=float, default=1e-5, help='learning rate. default is 0.0001.') parser.add_argument('--factor', type=float, default=0.5, help='learning rate schedule factor. default is 0.5.') parser.add_argument('--wd', type=float, default=0.00001, help='weight decay rate. default is 0.00001.') parser.add_argument('--alpha', type=float, default=45, help='Alpha constant in degrees. Default is 45.') parser.add_argument( '--angular-lambda', type=float, default=0.0, help= 'Angular loss factor used together with NPair loss. 0 turns of NPair loss. Default is 0.' ) parser.add_argument( '--l2reg-weight', type=float, default=0.0005, help='Weight of L2 regularization for feature vectors. ' 'Default is 0.25 * 0.002. Used only in NL&AL.') parser.add_argument( '--steps', type=str, default='10,20', help='epochs to update learning rate. default is 12,14,16,18.') parser.add_argument( '--same-image-sampling', type=float, default=0.1, help='Chance to sample both items from the same image. Default is 0.1') parser.add_argument('--symmetric-loss', action="store_true", help='Use symmetric loss. Used only in NL&AL.') parser.add_argument('--decrease-cnn-lr', action="store_true", help='Use a lower LR on the feature extractor') opt = parser.parse_args() if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt
def parse_args(): parser = TrainingParser( description= 'Distance metric learning using ranked list loss with semihard mining', default_logfile='train_rankedlist.log', default_model_prefix='rankedlist_model') parser.add_argument('--batch-size', type=int, default=180, help='Number of samples in a batch. Default is 180') parser.add_argument('--epochs', type=int, default=60, help='number of training epochs. default is 60.') parser.add_argument('--lr', type=float, default=0.0001, help='learning rate. default is 0.0001.') parser.add_argument('--factor', type=float, default=0.5, help='learning rate schedule factor. default is 0.5.') parser.add_argument('--wd', type=float, default=0.00001, help='weight decay rate. default is 0.00001.') parser.add_argument( '--steps', type=str, default='20,30,40', help='epochs to update learning rate. default is 20,30,40.') parser.add_argument('--batch-k', type=int, default=3, help='Number of images per class') parser.add_argument('--alpha', type=float, default=1.2, help='Margin for negatives') parser.add_argument('--margin', type=float, default=0.4, help='Margin for positives') parser.add_argument('--temperature', type=float, default=10, help='Temperature for negatives') parser.add_argument( '--iteration-per-epoch', type=int, default=200, help='Number of iterations per epoch for iteration-based training') parser.add_argument('--bottleneck-layers', type=str, default='') opt = parser.parse_args() assert opt.batch_size % opt.batch_k == 0, 'Batch size must be divisible by batch-k' if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt
def parse_args(): parser = TrainingParser( description= 'Distance metric learning with marginloss and distance-weighted sampling.', default_logfile='train_margin.log', default_model_prefix='margin_loss_model') parser.add_argument( '--batch-size', type=int, default=125, help='Number of samples in a batch per compute unit. Default is 125.' 'Must be divisible with batch-k.') parser.add_argument( '--batch-k', type=int, default=5, help='number of images per class in a batch. default is 5.') parser.add_argument('--epochs', type=int, default=20, help='number of training epochs. default is 20.') parser.add_argument('--lr', type=float, default=0.0001, help='learning rate. default is 0.0001.') parser.add_argument( '--lr-beta', type=float, default=0.1, help='learning rate for the beta in margin based loss. default is 0.1.' ) parser.add_argument( '--margin', type=float, default=0.2, help='margin for the margin based loss. default is 0.2.') parser.add_argument('--beta', type=float, default=1.2, help='initial value for beta. default is 1.2.') parser.add_argument( '--nu', type=float, default=0.0, help='regularization parameter for beta. default is 0.0.') parser.add_argument('--factor', type=float, default=0.5, help='learning rate schedule factor. default is 0.5.') parser.add_argument( '--steps', type=str, default='12,14,16,18', help='epochs to update learning rate. default is 12,14,16,18.') parser.add_argument('--wd', type=float, default=0.00001, help='weight decay rate. default is 0.00001.') parser.add_argument('--iteration-per-epoch', type=int, default=200, help='Number of iteration per epoch. default=200.') opt = parser.parse_args() if opt.logfile.lower() != 'none': logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) return opt