# one of the 'TMAX', 'TMIN', 'TMEAN', 'PCT' parser.add_argument('v', help='Downscaling variable name') # one of the 'annual', 'summer', 'winter', 'djf', 'mam', 'jja', 'son' parser.add_argument('s', help='Training seasons') # a number of 1, 2, ..., 5 # but 3 is expected, the rest are trial-and-error options parser.add_argument('c1', help='Number of input channels (1<c<5)') # 1 is expected, other numbers are reserved for UNET-AE parser.add_argument('c2', help='Number of output channels (1<c<3)') args = vars(parser.parse_args()) # parser handling VAR, seasons, input_flag, output_flag = tu.parser_handler(args) N_input = int(np.sum(input_flag)) N_output = int(np.sum(output_flag)) if N_output > 1: raise ValueError('Nest-Net accepts only one target') # ---------------------------------------------------------- # # number of filters based on the downscaling variable if VAR == 'PCT': print('PCT hidden layer setup') N = [64, 96, 128, 160] else: print('T2 hidden layer setup') N = [56, 112, 224, 448]
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/DL_downscaling/utils/') sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/DL_downscaling/') from namelist import * import model_utils as mu import train_utils as tu # parse user inputs parser = argparse.ArgumentParser() # positionals parser.add_argument('v', help='Downscaling variable name') parser.add_argument('s', help='Training seasons summer/winter') parser.add_argument('c1', help='Number of input channels (1<c<5)') parser.add_argument('c2', help='Number of output channels (1<c<3)') args = vars(parser.parse_args()) # parser handling VAR, seasons, input_flag, output_flag = tu.parser_handler( args) # prefer seasons = 'annual' N_input = int(np.sum(input_flag)) # number of input channels N_output = int(np.sum(output_flag)) # number of (unsupervised) output channels if N_output > 1: raise ValueError('UNet accepts only one target') # # hidden layer numbers (symetrical) if VAR == 'PCT': print('PCT hidden layer setup') N = [64, 96, 128, 160] else: print('T2 hidden layer setup') N = [48, 96, 192, 384] activation = 'relu' # leakyReLU instead of ReLU pool = False # stride convolution instead of maxpooling