Пример #1
0
activation='relu' # leakyReLU instead of ReLU
pool=False # stride convolution instead of maxpooling

key = 'PPT'

for sea in seasons:
    # ---------- Generator ---------- #
    # load weights
    model_name = 'UNET-G{}_{}_{}_tune'.format(N_input, VAR, sea) # UNET{}_{}_{}_tune
    model_path = temp_dir+model_name+'.hdf'
    print('Import model: {}'.format(model_name))
    backbone = keras.models.load_model(model_path)
    W = backbone.get_weights()

    # generator
    G = mu.UNET(N, (None, None, N_input), pool=pool, activation=activation)
    # optimizer
    opt_G = keras.optimizers.Adam(lr=l[0])

    print('Compiling G')
    G.compile(loss=keras.losses.mean_squared_error, optimizer=opt_G)
    G.set_weights(W)
    # <--- 
    # ---------- Descriminator ---------- #
    # load weights
    model_name = 'NEO_D_{}_{}_pretrain'.format(VAR, sea) # GAN_D_{}_{}
    model_path = temp_dir+model_name+'.hdf'

    print('Import model: {}'.format(model_name))
    backbone = keras.models.load_model(model_path)
    W = backbone.get_weights()
Пример #2
0
#     return 0.25*K.mean(K.abs(loss_models[0](y_true) - loss_models[0](y_pred)))+\
#            0.25*K.mean(K.abs(loss_models[1](y_true) - loss_models[1](y_pred)))+\
#            0.25*K.mean(K.abs(loss_models[2](y_true) - loss_models[2](y_pred)))+\
#            0.25*K.mean(K.abs(y_true -y_pred))

for sea in seasons:
    print('===== {} training ====='.format(sea))
    # Content loss definition
    loss_models = dummy_loss_model(VAR, sea)
    W = dummy_model_loader(N_input, VAR, sea)

    # tuned model
    model_name_tune = 'UNET-C{}_{}_{}_tune'.format(N_input, VAR, sea)
    model_path_tune = temp_dir+model_name_tune+'.hdf' # checkpoint
    train_path_tune = temp_dir+model_name_tune+'.npy' # train history
    model = mu.UNET(N, (None, None, N_input))
    # optimizer
    opt_sgd = keras.optimizers.SGD(lr=l, decay=0.025)
    # callback
    callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.000001, patience=2, verbose=True),
                 keras.callbacks.ModelCheckpoint(filepath=model_path_tune, verbose=True,
                                                 monitor='val_loss', save_best_only=True)]
    # compile
    model.compile(loss=CLOSS, optimizer=opt_sgd, metrics=[keras.losses.mean_absolute_error])
    model.set_weights(W)

    # full list of training files
    trainfiles = glob(file_path+'{}_BATCH_*_TORI_*{}*.npy'.format(VAR, sea)) # e.g., TMAX_BATCH_128_VORI_mam30.npy, 
    validfiles = glob(file_path+'{}_BATCH_*_VORI_*{}*.npy'.format(VAR, sea)) # excluding "ORIAUG" pattern
    # shuffle filenames
    shuffle(trainfiles)
Пример #3
0
lr = 5e-5
epochs = 150
activation='relu'
pool=False # stride convolution instead of maxpooling

# early stopping settings
min_del = 0
max_tol = 3 # early stopping with patience

# ---------------------------------------------------------- #
# training by seasons
for sea in seasons:
    
    # UNET configuration
    dscale_unet = mu.UNET(N, (None, None, N_input), pool=pool, activation=activation)
    opt_ = keras.optimizers.Adam(lr=lr)
    dscale_unet.compile(loss=keras.losses.mean_absolute_error, optimizer=opt_)
    
    # check point settings
    # "temp_dir" is where models are saved, defined in the namelist.py
    save_name = 'UNET_raw_{}_{}'.format(VAR, sea)
    save_path = temp_dir+save_name+'/'
    hist_path = temp_dir+'{}_loss.npy'.format(save_name)

    # allocate arrays for training/validation loss
    T_LOSS = np.empty((int(epochs*L_train),)); T_LOSS[...] = np.nan
    V_LOSS = np.empty((epochs,)); V_LOSS[...] = np.nan
    
    # ---------------------- data section ---------------------- #
Пример #4
0
    print('Importing pre-trained weights')
    # import pre-trained model (e.g., 'UNET_TMAX_A3_djf.hdf')
    model_name = 'UAE{}_{}_{}_tune'.format(N_input, VAR, sea)
    model_path = temp_dir + model_name + '.hdf'
    print('\tmodel: {}'.format(model_name))
    backbone = keras.models.load_model(model_path)
    W = backbone.get_weights()
    # tuned model
    model_name_tune = 'UAE{}_{}_{}_trans'.format(N_input, VAR,
                                                 sea)  # save separatly
    model_path_tune = temp_dir + model_name_tune + '.hdf'  # checkpoint

    print('Tuning model configurations')
    # elev output branch
    elev_tuner = mu.UNET(N, (None, None, N_input))
    elev_tuner = freeze_unet(elev_tuner, l=19, lr=1e-6)  # 19
    # two output branches
    target_tuner = mu.UNET_AE(N, (None, None, N_input),
                              output_channel_num=N_output,
                              drop_rate=0)
    target_tuner = freeze_uae(target_tuner, l=0, lr=1e-6)

    target_tuner.set_weights(W)
    for n in range(max_iter):
        print('Tuning epoch = {}'.format(n))
        if n == 0:
            target_tuner.set_weights(W)  # set backbone weights
            print('\t Performance before tuning')
            baseline_hist = target_tuner.evaluate_generator(gen_valid,
                                                            verbose=1)
Пример #5
0
inout_flag = [True, True, False, False, True, True]
labels = ['batch', 'batch']  # input and output labels

file_path = BATCH_dir

for sea in seasons:
    print('========== {} =========='.format(sea))
    # train with tuned UNET
    model_name = 'UNET-G{}_{}_{}_tune'.format(N_input, VAR, sea)
    model_path = temp_dir + model_name + '.hdf'
    print('Import model: {}'.format(model_name))
    backbone = keras.models.load_model(model_path)
    W = backbone.get_weights()

    # generator
    G = mu.UNET(N, (None, None, N_input), pool=pool)
    # optimizer
    opt_G = keras.optimizers.Adam(lr=l[0])

    print('Compiling G')
    G.compile(loss=keras.losses.mean_absolute_error, optimizer=opt_G)
    G.set_weights(W)

    input_size = (None, None, N_input + 1)
    D = mu.vgg_descriminator(N, input_size)

    opt_D = keras.optimizers.Adam(lr=l[1])
    print('Compiling D')
    D.compile(loss=keras.losses.mean_squared_error, optimizer=opt_D)

    trainfiles = glob(file_path + '{}_BATCH_*_TORI_*{}*.npy'.format(