parser.add_argument('--model_type', type=str, choices=['decision_level_max_pooling', 'decision_level_average_pooling', 'decision_level_single_attention', 'decision_level_multi_attention', 'feature_level_single_attention']) parser.add_argument('--learning_rate', type=float, default=1e-3) subparsers = parser.add_subparsers(dest='mode') parser_train = subparsers.add_parser('train') parser_get_avg_stats = subparsers.add_parser('get_avg_stats') args = parser.parse_args() args.filename = utilities.get_filename(__file__) # Logs sub_dir = os.path.join(args.filename, 'balance_type={}'.format(args.balance_type), 'model_type={}'.format(args.model_type)) logs_dir = os.path.join(args.workspace, 'logs', sub_dir) utilities.create_folder(logs_dir) logging = utilities.create_logging(logs_dir, filemode='w') logging.info(os.path.abspath(__file__)) logging.info(args) totest = 0
# parser_train.add_argument('--cuda', action='store_true', default=True) # append from main.py parser_train = subparsers.add_parser('train') parser_train.add_argument('--workspace', type=str, required=True) parser_train.add_argument('--sample_rate', type=int, default=32000) parser_train.add_argument('--window_size', type=int, default=1024) parser_train.add_argument('--hop_size', type=int, default=320) parser_train.add_argument('--mel_bins', type=int, default=64) parser_train.add_argument('--fmin', type=int, default=50) parser_train.add_argument('--fmax', type=int, default=14000) parser_train.add_argument('--model_type', type=str, required=True) parser_train.add_argument('--loss_type', type=str, default='clip_bce', choices=['clip_bce']) parser_train.add_argument('--balanced', type=str, default='balanced', choices=['none', 'balanced', 'alternate']) parser_train.add_argument('--augmentation', type=str, default='mixup', choices=['none', 'mixup']) parser_train.add_argument('--batch_size', type=int, default=32) parser_train.add_argument('--learning_rate', type=float, default=1e-3) parser_train.add_argument('--resume_iteration', type=int, default=0) parser_train.add_argument('--early_stop', type=int, default=20000) # early_stop * batch_size / num_trn_samples = epoch parser_train.add_argument('--pretrained_checkpoint_path', type=str) # parser_train.add_argument('--freeze_base', action='store_true', default=False) parser_train.add_argument('--freeze_base_num', type=int, default=0) # Parse arguments args = parser.parse_args() args.filename = get_filename(__file__) if args.mode == 'train': train(args) else: raise Exception('Error argument!')