示例#1
0
    def __init__(self, config_fn, data_fn, nlat=96, nlon=144, nlev=30, ntime=48):

        self.nlat, self.nlon = nlat, nlon
        self.ngeo = nlat * nlon

        repo_dir = os.getcwd().split('CBRAIN-CAM')[0] + 'CBRAIN-CAM/'
        with open(config_fn, 'r') as f:
            config = yaml.load(f)

        self.model = tf.keras.models.load_model(
            repo_dir + 'saved_models/' + config['exp_name'] + '/model.h5',
            custom_objects={**layer_dict, **loss_dict})

        out_scale_dict = load_pickle(config['output_dict'])

        self.valid_gen = DataGenerator(
            data_fn=config['data_dir'] + config['valid_fn'],
            input_vars=config['inputs'],
            output_vars=config['outputs'],
            norm_fn=config['data_dir'] + config['norm_fn'],
            input_transform=(config['input_sub'], config['input_div']),
            output_transform=out_scale_dict,
            batch_size=self.ngeo,
            shuffle=False,
            xarray=True,
            var_cut_off=config['var_cut_off'] if 'var_cut_off' in config.keys() else None
        )
    def __init__(self, model, config_fn, data_fn, nlat=64, nlon=128, nlev=30, ntime=48):

        self.nlat, self.nlon = nlat, nlon
        self.ngeo = nlat * nlon

        repo_dir = os.getcwd().split('CBRAIN-CAM')[0] + 'CBRAIN-CAM/'
        with open(config_fn, 'r') as f:
            config = yaml.load(f)

#         self.model = tf.keras.models.load_model(
#             repo_dir + 'saved_models/' + config['exp_name'] + '/model.h5',
#             custom_objects={**layer_dict, **loss_dict})
#         self.model = tf.keras.models.load_model(model_path,
#             custom_objects={**layer_dict, **loss_dict})
        self.model = model

        out_scale_dict = load_pickle(config['output_dict'])

        self.valid_gen = DataGenerator(
            # tgb - 4/16/2019 - Changed data_fn to the argument data_fn of ModelDiagnostics
            #data_fn=config['data_dir'] + config['valid_fn'],
            data_fn = data_fn,
            input_vars=config['inputs'],
            output_vars=config['outputs'],
            norm_fn=config['data_dir'] + config['norm_fn'],
            input_transform=(config['input_sub'], config['input_div']),
            output_transform=out_scale_dict,
            batch_size=self.ngeo,
            shuffle=False,
            xarray=True,
            var_cut_off=config['var_cut_off'] if 'var_cut_off' in config.keys() else None
        )
示例#3
0
    def __init__(self,
                 model,
                 config_fn,
                 data_fn,
                 normalize_flag=True,
                 nlat=64,
                 nlon=128,
                 nlev=30,
                 ntime=48):

        self.nlat, self.nlon = nlat, nlon
        self.ngeo = nlat * nlon

        repo_dir = os.getcwd().split('CBRAIN-CAM')[0] + 'CBRAIN-CAM/'
        with open(config_fn, 'r') as f:
            config = yaml.load(f)

        self.model = model

        out_scale_dict = load_pickle(config['output_dict'])

        self.valid_gen = DataGenerator(
            # tgb - 4/16/2019 - Changed data_fn to the argument data_fn of ModelDiagnostics
            #data_fn=config['data_dir'] + config['valid_fn'],
            data_fn=data_fn,
            input_vars=config['inputs'],
            output_vars=config['outputs'],
            norm_fn=config['data_dir'] + config['norm_fn'],
            input_transform=(config['input_sub'], config['input_div']),
            output_transform=out_scale_dict,
            batch_size=self.ngeo,
            shuffle=False,
            xarray=True,
            normalize_flag=normalize_flag,
            var_cut_off=config['var_cut_off']
            if 'var_cut_off' in config.keys() else None)
示例#4
0
def main(args):
    """Main training script."""

    if args.gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    limit_mem()

    # Load output scaling dictionary
    out_scale_dict = load_pickle(args.output_dict)

    logging.info('Create training and validation data generators')
    train_gen = DataGenerator(data_fn=args.data_dir + args.train_fn,
                              input_vars=args.inputs,
                              output_vars=args.outputs,
                              norm_fn=args.data_dir + args.norm_fn,
                              input_transform=(args.input_sub, args.input_div),
                              output_transform=out_scale_dict,
                              batch_size=args.batch_size,
                              shuffle=True,
                              var_cut_off=args.var_cut_off)

    if args.valid_fn is not None:
        valid_gen = DataGenerator(data_fn=args.data_dir + args.valid_fn,
                                  input_vars=args.inputs,
                                  output_vars=args.outputs,
                                  norm_fn=args.data_dir + args.norm_fn,
                                  input_transform=(args.input_sub,
                                                   args.input_div),
                                  output_transform=out_scale_dict,
                                  batch_size=args.batch_size * 10,
                                  shuffle=False,
                                  var_cut_off=args.var_cut_off)
    else:
        valid_gen = None

    logging.info('Build model')
    model = fc_model(input_shape=train_gen.n_inputs,
                     output_shape=train_gen.n_outputs,
                     hidden_layers=args.hidden_layers,
                     activation=args.activation,
                     conservation_layer=args.conservation_layer,
                     inp_sub=train_gen.input_transform.sub,
                     inp_div=train_gen.input_transform.div,
                     norm_q=out_scale_dict['PHQ'])
    print(model.summary())

    logging.info('Compile model')
    if args.loss == 'weak_loss':
        loss = WeakLoss(model.input,
                        inp_div=train_gen.input_transform.div,
                        inp_sub=train_gen.input_transform.sub,
                        norm_q=out_scale_dict['PHQ'],
                        alpha_mass=args.alpha_mass,
                        alpha_ent=args.alpha_ent,
                        noadiab=args.noadiab)
    else:
        loss = args.loss

    metrics = [mse]
    if args.conservation_metrics:
        mass_loss = WeakLoss(model.input,
                             inp_div=train_gen.input_transform.div,
                             inp_sub=train_gen.input_transform.sub,
                             norm_q=out_scale_dict['PHQ'],
                             alpha_mass=1,
                             alpha_ent=0,
                             name='mass_loss',
                             noadiab=args.noadiab)
        ent_loss = WeakLoss(model.input,
                            inp_div=train_gen.input_transform.div,
                            inp_sub=train_gen.input_transform.sub,
                            norm_q=out_scale_dict['PHQ'],
                            alpha_mass=0,
                            alpha_ent=1,
                            name='ent_loss',
                            noadiab=args.noadiab)
        metrics += [mass_loss, ent_loss]

    model.compile(args.optimizer, loss=loss, metrics=metrics)
    lrs = LearningRateScheduler(LRUpdate(args.lr, args.lr_step,
                                         args.lr_divide))

    logging.info('Train model')
    model.fit_generator(train_gen,
                        epochs=args.epochs,
                        validation_data=valid_gen,
                        callbacks=[lrs])

    if args.exp_name is not None:
        exp_dir = args.model_dir + args.exp_name + '/'
        os.makedirs(exp_dir, exist_ok=True)
        model_fn = exp_dir + 'model.h5'
        logging.info(f'Saving model as {model_fn}')
        model.save(model_fn)

        if args.save_txt:
            weights_fn = exp_dir + 'weights.h5'
            logging.info(f'Saving weights as {weights_fn}')
            model.save_weights(weights_fn)
            save2txt(weights_fn, exp_dir)
            save_norm(train_gen.input_transform, train_gen.output_transform,
                      exp_dir)

    logging.info('Done!')
in_vars_RH = ['RH', 'TBP', 'PS', 'SOLIN', 'SHFLX', 'LHFLX']
#out_vars_RH = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS']

TRAINFILE_RH = 'CI_RH_M4K_NORM_train_shuffle.nc'
NORMFILE_RH = 'CI_RH_M4K_NORM_norm.nc'
VALIDFILE_RH = 'CI_RH_M4K_NORM_valid.nc'  # Experiment 1/2
#VALIDFILE_RH = 'CI_RH_P4K_NORM_valid.nc' # Experiment 3/4

# In[8]:

train_gen_RH = DataGenerator(
    data_fn=path + TRAINFILE_RH,
    input_vars=in_vars_RH,
    output_vars=out_vars_RH,
    norm_fn=path + NORMFILE_RH,
    input_transform=('mean', 'maxrs'),
    output_transform=scale_dict_RH,
    batch_size=BATCH_SIZE,
    shuffle=True,
)

# ### Data Generator using TNS

# In[9]:

in_vars = ['QBP', 'TfromNS', 'PS', 'SOLIN', 'SHFLX', 'LHFLX']
#out_vars = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS']

TRAINFILE_TNS = 'CI_TNS_M4K_NORM_train_shuffle.nc'
NORMFILE_TNS = 'CI_TNS_M4K_NORM_norm.nc'
VALIDFILE_TNS = 'CI_TNS_M4K_NORM_valid.nc'  # Experiment 1/2
示例#6
0
scale_dict[
    'RH'] = 0.01 * L_S / G,  # Arbitrary 0.1 factor as specific humidity is generally below 2%

in_vars_RH = ['RH', 'TBP', 'PS', 'SOLIN', 'SHFLX', 'LHFLX']
out_vars_RH = ['PHQ', 'TPHYSTND', 'FSNT', 'FSNS', 'FLNT', 'FLNS']

TRAINFILE_RH = 'CI_RH_M4K_NORM_train_shuffle.nc'
NORMFILE_RH = 'CI_RH_M4K_NORM_norm.nc'
VALIDFILE_RH = 'CI_RH_M4K_NORM_valid.nc'
BASE_DIR = '/DFS-L/DATA/pritchard/ankitesg/'

train_gen_RH = DataGenerator(data_fn=f"{BASE_DIR}data/{TRAINFILE_RH}",
                             input_vars=in_vars_RH,
                             output_vars=out_vars_RH,
                             norm_fn=f"{BASE_DIR}data/{NORMFILE_RH}",
                             input_transform=('mean', 'maxrs'),
                             output_transform=scale_dict,
                             batch_size=1024,
                             shuffle=True,
                             normalize_flag=True)

in_vars = ['QBP', 'TfromNSV2', 'PS', 'SOLIN', 'SHFLX', 'LHFLX']
out_vars = ['PHQ', 'TPHYSTND', 'FSNT', 'FSNS', 'FLNT', 'FLNS']

TRAINFILE_TNS = 'CI_TNSV2_M4K_NORM_train_shuffle.nc'
NORMFILE_TNS = 'CI_TNSV2_M4K_NORM_norm.nc'
VALIDFILE_TNS = 'CI_TNSV2_M4K_NORM_valid.nc'

train_gen_TNS = DataGenerator(data_fn=f"{BASE_DIR}data/{TRAINFILE_TNS}",
                              input_vars=in_vars,
                              output_vars=out_vars,
示例#7
0
def main(inargs):
    """Main function.
    """
    # set GPU usage
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    keras.backend.tensorflow_backend.set_session(tf.Session(config=config))

    if inargs.norm_fn is None:
        norm_fn = inargs.train_fn.split('_shuffle')[0] + '_norm.nc'
    else:
        norm_fn = inargs.norm_fn

    # Load train and valid set
    train_gen = DataGenerator(
        inargs.data_dir,
        inargs.train_fn + '_features.nc',
        inargs.train_fn + '_targets.nc',
        inargs.batch_size,
        norm_fn,
        inargs.fsub, inargs.fdiv, inargs.tsub, inargs.tmult,
        shuffle=True, noise=inargs.noise,
    )
    valid_gen = DataGenerator(
        inargs.data_dir,
        inargs.valid_fn + '_features.nc',
        inargs.valid_fn + '_targets.nc',
        16384,  # Large batch size for speed!
        norm_fn,
        inargs.fsub, inargs.fdiv, inargs.tsub, inargs.tmult,
        shuffle=False,
    )
    feature_shape = train_gen.feature_shape
    target_shape = train_gen.target_shape

    # Build and compile model
    if inargs.convolution:
        model = conv_model(
            (30, 7) if inargs.tile else (30, 3),
            4,
            target_shape,
            inargs.conv_layers,
            inargs.hidden_layers,
            inargs.lr,
            loss_dict[inargs.loss],
            kernel_size=inargs.kernel_size, stride=inargs.stride,
            batch_norm=inargs.batch_norm,
            activation=inargs.activation,
            tile=inargs.tile,
            locally_connected=inargs.locally_connected,
            padding=inargs.padding,
            dr=inargs.dr,
        )
    else:   # Fully connected model
        model = fc_model(
            feature_shape,
            target_shape,
            inargs.hidden_layers,
            inargs.lr,
            loss_dict[inargs.loss],
            batch_norm=inargs.batch_norm,
            activation=inargs.activation,
            dr=inargs.dr,
            l2=inargs.l2,
            partial_relu=inargs.partial_relu,
	        eq=inargs.eq,
            fsub=train_gen.feature_norms[0],
            fdiv=train_gen.feature_norms[1],
        )
    if inargs.verbose: print(model.summary())

    callbacks_list = []
    if inargs.log_dir is not None:
        callbacks_list.append(TensorBoard(log_dir=inargs.log_dir +
                                                  inargs.exp_name))
    if not inargs.lr_step == 0:
        def lr_update(epoch):
            # From goo.gl/GXQaK6
            init_lr = inargs.lr
            drop = 1./inargs.lr_divide
            epochs_drop = inargs.lr_step
            lr = init_lr * np.power(drop, np.floor((1+epoch)/epochs_drop))
            print('lr:', lr)
            return lr
        callbacks_list.append(LearningRateScheduler(lr_update))

    # Fit model
    model.fit_generator(
        train_gen.return_generator(inargs.convolution, inargs.tile),
        train_gen.n_batches,
        epochs=inargs.epochs - int(inargs.valid_after),
        validation_data=None if inargs.valid_after
            else valid_gen.return_generator(inargs.convolution, inargs.tile),
        validation_steps=valid_gen.n_batches,
        workers=inargs.n_workers,
        max_queue_size=50,
        callbacks=callbacks_list,
    )
    if inargs.valid_after:
        # Run last epoch with validation
        model.optimizer.lr = tf.Variable(lr_update(inargs.epochs))
        if len(callbacks_list) == 1:
            callbacks_list = []
        else:
            callbacks_list = [callbacks_list[0]]   # No LR scheduler
        model.fit_generator(
            train_gen.return_generator(inargs.convolution, inargs.tile),
            train_gen.n_batches,
            epochs=1,
            validation_data=valid_gen.return_generator(inargs.convolution, inargs.tile),
            validation_steps=valid_gen.n_batches,
            workers=inargs.n_workers,
            max_queue_size=inargs.max_queue_size,
            callbacks=callbacks_list,
        )
    if inargs.exp_name is not None:
        if not os.path.exists(inargs.model_dir): os.mkdir(inargs.model_dir)
        model.save(inargs.model_dir + '/' + inargs.exp_name + '.h5')