def add_additional_params_to_args(args): dataset = args.src_dataset if "src_dataset" in args.__dict__.keys( ) else args.tgt_dataset args.n_class = get_n_class(dataset) args.machine = os.uname()[1] return args
def add_additional_params_to_args(args): dataset = args.src_dataset if "src_dataset" in args.__dict__.keys() else args.tgt_dataset args.n_class = get_n_class(dataset) args.machine = os.uname()[1] def add_img_shape_to_args(args): if "src_dataset" in args.__dict__.keys() and "train_img_shape" in args.__dict__.keys(): if args.train_img_shape is None: args.train_img_shape = get_img_shape(args.src_dataset, is_train=True) print("args.train_img_shape is set to %s" % args.train_img_shape) if "tgt_dataset" in args.__dict__.keys() and "test_img_shape" in args.__dict__.keys(): if args.test_img_shape is None: args.test_img_shape = get_img_shape(args.tgt_dataset, is_train=False) print("args.test_img_shape is set to %s" % args.test_img_shape) return args args = add_img_shape_to_args(args) return args
parser.add_argument("--add_bg_loss", action="store_true", help='whether you add background loss or not') parser.add_argument("--adjust_lr", action="store_true", help='whether you change lr') parser.add_argument("--max_iter", type=int, default=5000) parser.add_argument( "--fix_bn", action="store_true", help='whether you fix the paramters of batch normalization layer') args = parser.parse_args() check_src_tgt_ok(args.src_dataset, args.tgt_dataset) args.n_class = get_n_class(args.src_dataset, args.tgt_dataset) weight = torch.ones(args.n_class) if args.loss_weights_file: import pandas as pd loss_df = pd.read_csv(args.loss_weights_file) loss_df.sort_values("class_id", inplace=True) weight *= torch.FloatTensor(loss_df.weight.values) if not args.add_bg_loss: weight[args.n_class - 1] = 0 # Ignore background loss # print ("loss weight %s" % weight) if args.net == "fcn":