def compile_models(lstm_model, hyperparams):
    """
    Compile full training model with choosen hyperparameters.
    
    Parameters
    ----------
    lstm_model -> Keras model : Full training model to be compiled
    hyperparams -> dict : Dictionary containing all hyperparamers for model compilation
        
    Returns
    -------
    lstm_model (compiled) -> keras compiled model : A fully-compiled keras LSTM model using the specified hyperparameters.
    """
    clip_select_flag = hyperparams["clip_select_flag"]
    learning_rate = hyperparams["learning_rate"]
    BETA_1 = hyperparams["BETA_1"]
    BETA_2 = hyperparams["BETA_2"]
    lr_decay = hyperparams["lr_decay"]
    slow_weights = hyperparams["slow_weights_lookahead"]
    sync_lookahead = hyperparams["sync_lookahead"]
    warmup_RAdam = hyperparams["warmup_RAdam"]
    min_lr_RAdam = hyperparams["min_lr_RAdam"]
    weight_decay_RAdam = hyperparams["weight_decay_RAdam"]
    total_steps_RAdam = hyperparams["total_steps_RAdam"]
    clip_norm_thresh = hyperparams["clip_norm_thresh"]
    clip_val_thresh = hyperparams["clip_val_thresh"]
    ams_grad_flag = hyperparams["ams_grad_flag"]
    loss_function = hyperparams["loss_fcn"]
    optimizer = hyperparams["optimizer"]
    epsilon = hyperparams["epsilon"]
    
    if(clip_select_flag == "norm" and optimizer == "adam"):
        opt_norm_clip = Lookahead(keras.optimizers.adam(lr = learning_rate, beta_1 = BETA_1, beta_2 = BETA_2, decay = lr_decay, amsgrad = ams_grad_flag), sync_period = sync_lookahead, slow_step = slow_weights)
        lstm_model.compile(optimizer = opt_norm_clip, loss = loss_function, metrics = [rmse])
        return lstm_model
    
    if(clip_select_flag == "norm" and optimizer == "nadam"):
        opt_norm_clip = keras.optimizers.nadam(lr = learning_rate, beta_1 = BETA_1, beta_2 = BETA_2, epsilon = epsilon, schedule_decay = lr_decay)
        lstm_model.compile(optimizer = opt_norm_clip, loss = loss_function, metrics = [rmse])
        return lstm_model

    elif(clip_select_flag == "value" and optimizer == "adam"):
        opt_val_clip = Lookahead(keras.optimizers.adam(lr = learning_rate, beta_1 = BETA_1, beta_2 = BETA_2, decay = lr_decay, clipvalue = clip_val_thresh, amsgrad = ams_grad_flag), sync_period = sync_lookahead, slow_step = slow_weights)
        lstm_model.compile(optimizer = opt_val_clip, loss = loss_function, metrics = [rmse])
        return lstm_model

    elif(clip_select_flag == "value" and optimizer == "nadam"):
        opt_val_clip = keras.optimizers.nadam(lr = learning_rate, beta_1 = BETA_1, beta_2 = BETA_2, epsilon = epsilon, schedule_decay = lr_decay)
        lstm_model.compile(optimizer = opt_val_clip, loss = loss_function, metrics = [rmse])
        return lstm_model

    elif(optimizer == "RAdam"):
        opt = RAdam(learning_rate = learning_rate, beta_1 = BETA_1, beta_2 = BETA_2, epsilon = epsilon, weight_decay = weight_decay_RAdam, amsgrad = ams_grad_flag, total_steps = total_steps_RAdam, warmup_proportion = warmup_RAdam, min_lr = min_lr_RAdam)
        lstm_model.compile(optimizer = opt, loss = loss_function, metrics = [rmse]) 
        return lstm_model

    elif(optimizer == "Ranger"):
        opt = Lookahead(RAdam(learning_rate = learning_rate, beta_1 = BETA_1, beta_2 = BETA_2, epsilon = epsilon, weight_decay = weight_decay_RAdam, amsgrad = ams_grad_flag, total_steps = total_steps_RAdam, warmup_proportion = warmup_RAdam, min_lr = min_lr_RAdam), sync_period = sync_lookahead, slow_step = slow_weights)
        lstm_model.compile(optimizer = opt, loss = loss_function, metrics = [rmse]) 
        return lstm_model

    else:
        print(" Clipping Method OR Optimizer Selected is not avalaible! Please enter a valid string for these parameter: \n Valid Clipping:['norm', 'value'] \n Valid Optimizers: ['adam', 'NAdam', 'RAdam', 'Ranger']")
        return lstm_model
Ejemplo n.º 2
0
def train_model(input_data: str,
                test_file=None,
                batch_size=64,
                nb_epoch=100,
                early_stop_patience=None,
                mod=None,
                max_x_length=50,
                scale_para=None,
                unit="s",
                out_dir="./",
                prefix="test",
                p_model=None,
                model=None,
                optimizer_name=None,
                use_radam=False,
                add_reverse=False,
                add_ReduceLROnPlateau=False,
                use_external_test_data=True):
    """
    Used by AutoRT
    :param input_data:
    :param test_file:
    :param batch_size:
    :param nb_epoch:
    :param early_stop_patience:
    :param mod:
    :param max_x_length:
    :param min_rt:
    :param max_rt:
    :param unit:
    :param out_dir:
    :param prefix:
    :param p_model:
    :param model:
    :param optimizer_name:
    :param use_radam:
    :param use_external_test_data:
    :return:
    """

    res_map = dict()

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    print("Build deep learning model ...")

    X_train, Y_train, X_test, Y_test, scale_para = data_processing(
        input_data=input_data,
        test_file=test_file,
        mod=mod,
        max_x_length=max_x_length,
        scale_para=scale_para,
        unit=unit,
        out_dir=out_dir,
        add_reverse=add_reverse)

    if model is None:
        print("Use default model ...")
        model = build_default_model(X_train.shape[1:])
    else:
        print("Use input model ...")
        model = clone_model(model)

    if p_model is not None:
        transfer_layer = 5
        frozen = True
        # model_copy.set_weights(model.get_weights())
        if use_radam == True:
            base_model = load_model(p_model,
                                    custom_objects={
                                        "Lookahead": Lookahead,
                                        "RAdam": RAdam
                                    })
        else:
            base_model = load_model(p_model)
        print("Perform transfer learning ...")
        n_layers = len(base_model.layers)
        print("The number of layers: %d" % (n_layers))
        for l in range((n_layers - transfer_layer)):
            if l != 0:
                model.layers[l].set_weights(base_model.layers[l].get_weights())
                if frozen is True:
                    model.layers[l].trainable = False
                    print("layer (frozen:True): %d %s" %
                          (l, model.layers[l].name))
                else:
                    print("layer (frozen:False): %d %s" %
                          (l, model.layers[l].name))

    if model.optimizer is None:
        ## use default optimizer: Adam
        if optimizer_name is None:

            if use_radam == True:
                print("Use optimizer: %s" % ("rectified-adam"))
                model.compile(
                    loss='mean_squared_error',
                    # math.ceil(X_train.shape[0]/batch_size*nb_epoch)
                    optimizer=Lookahead(RAdam(), sync_period=5, slow_step=0.5))
                #optimizer=Lookahead(RAdam(total_steps=math.ceil(X_train.shape[0]/batch_size), warmup_proportion=0.1, min_lr=1e-5, lr=0.001),sync_period=5, slow_step=0.5),
                #metrics=['mean_squared_error'])
            else:
                print("Use default optimizer:Adam")
                model.compile(loss='mean_squared_error', optimizer="adam")
                #metrics=['mean_squared_error'])
        else:

            if use_radam == True:
                print("Use optimizer: %s" % ("rectified-adam"))
                model.compile(loss='mean_squared_error',
                              optimizer=Lookahead(RAdam(),
                                                  sync_period=5,
                                                  slow_step=0.5))
                #optimizer=Lookahead(RAdam(total_steps=math.ceil(X_train.shape[0]/batch_size), warmup_proportion=0.1, min_lr=1e-5, lr=0.001),
                #                    sync_period=5, slow_step=0.5),
                #metrics=['mean_squared_error'])
            else:
                print("Use optimizer provided by user: %s" % (optimizer_name))
                model.compile(loss='mean_squared_error',
                              optimizer=optimizer_name)
                # Implementation from https://github.com/GLambard/AdamW_Keras
                #optimizer=Adam(amsgrad=True))
                #optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True))
                #optimizer=Lookahead(AdamW(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0., weight_decay=1e-4, batch_size=batch_size, samples_per_epoch=X_train.shape[0], epochs=nb_epoch),sync_period=5, slow_step=0.5))
                #metrics=['mean_squared_error'])

    else:
        if optimizer_name is None:

            if use_radam == True:
                print("Use optimizer: %s" % ("rectified-adam"))
                model.compile(loss='mean_squared_error',
                              optimizer=Lookahead(RAdam(),
                                                  sync_period=5,
                                                  slow_step=0.5))
                #optimizer=Lookahead(RAdam(total_steps=math.ceil(X_train.shape[0]/batch_size), warmup_proportion=0.1, min_lr=1e-5, lr=0.001),
                #                    sync_period=5, slow_step=0.5),
                #metrics=['mean_squared_error'])
            else:
                print("Use optimizer from the model.")
                model.compile(
                    loss='mean_squared_error',
                    ## In this case, we cannot change the learning rate.
                    optimizer=model.optimizer)
                #metrics=['mean_squared_error'])

        else:

            if use_radam == True:
                print("Use optimizer: %s" % ("rectified-adam"))
                model.compile(loss='mean_squared_error',
                              optimizer=Lookahead(RAdam(),
                                                  sync_period=5,
                                                  slow_step=0.5))
                #optimizer=Lookahead(RAdam(total_steps=math.ceil(X_train.shape[0]/batch_size), warmup_proportion=0.1, min_lr=1e-5, lr=0.001),
                #                    sync_period=5, slow_step=0.5),
                #metrics=['mean_squared_error'])
            else:
                print("Use optimizer provided by user: %s" % (optimizer_name))
                model.compile(
                    loss='mean_squared_error',
                    ## In this case, we cannot change the learning rate.
                    optimizer=optimizer_name)
                #metrics=['mean_squared_error'])

    print("optimizer: %s" % (type(model.optimizer)))

    model.summary()
    # model = multi_gpu_model(model, gpus=3)

    my_callbacks = RegCallback(X_train, X_test, Y_train, Y_test, scale_para)
    # Save model
    model_chk_path = out_dir + "/best_model.hdf5"
    mcp = ModelCheckpoint(model_chk_path,
                          save_best_only=True,
                          save_weights_only=False,
                          verbose=1,
                          mode='min')

    all_callbacks = list()
    all_callbacks.append(my_callbacks)
    all_callbacks.append(mcp)

    if add_ReduceLROnPlateau is True:
        print("Use ReduceLROnPlateau!")
        all_callbacks.append(
            keras.callbacks.ReduceLROnPlateau(patience=5,
                                              factor=0.5,
                                              verbose=1,
                                              min_lr=0.000001,
                                              min_delta=0))

    if early_stop_patience is not None:
        print("Use EarlyStopping: %d" % (early_stop_patience))
        all_callbacks.append(
            EarlyStopping(patience=early_stop_patience, verbose=1))

    ## monitor training information
    # tbCallBack = callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
    #model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, validation_data=(X_test, Y_test), callbacks=[my_callbacks, mcp])
    if use_external_test_data is True:
        model.fit(
            X_train,
            Y_train,
            batch_size=batch_size,
            epochs=nb_epoch,
            validation_data=(X_test, Y_test),
            # callbacks=[my_callbacks, mcp])
            callbacks=all_callbacks)
    else:
        model.fit(
            X_train,
            Y_train,
            batch_size=batch_size,
            epochs=nb_epoch,
            validation_split=0.1,
            # callbacks=[my_callbacks, mcp])
            callbacks=all_callbacks)

    ## get the best model
    if use_radam == True:
        model_best = load_model(model_chk_path,
                                custom_objects={
                                    "Lookahead": Lookahead,
                                    "RAdam": RAdam
                                })
    else:
        model_best = load_model(model_chk_path,
                                custom_objects={
                                    "Lookahead": Lookahead,
                                    "RAdam": RAdam
                                })

    y_pred = model_best.predict(X_test)

    y_pred_rev = scaling_y_rev(y_pred, scale_para)
    y_true = scaling_y_rev(Y_test, scale_para)

    #test_data['pred'] = y_pred_rev
    #test_data.to_csv("pred.csv")
    x = pd.DataFrame({
        "y": y_true,
        "y_pred": y_pred_rev.reshape(y_pred_rev.shape[0])
    })
    out_file = out_dir + "/" + prefix + ".csv"
    print("Prediction result: %s" % (out_file))
    x.to_csv(out_file)

    res_map['model'] = model_best
    return res_map
Ejemplo n.º 3
0
def build_model():
    con_filters = 128
    left_input_go = Input(shape=(bert_len + w2v_len, project_dim))
    right_input_go = Input(shape=(bert_len + w2v_len, project_dim))
    NUM_FILTERS = 32
    FILTER_LENGTH1 = 5
    FILTER_LENGTH2 = 5

    left_x_go = Conv1D(filters=NUM_FILTERS,
                       kernel_size=FILTER_LENGTH1,
                       activation='relu',
                       padding='valid',
                       strides=1)(left_input_go)
    left_x_go = Conv1D(filters=NUM_FILTERS * 2,
                       kernel_size=FILTER_LENGTH1,
                       activation='relu',
                       padding='valid',
                       strides=1)(left_x_go)
    left_x_go = Conv1D(filters=NUM_FILTERS * 3,
                       kernel_size=FILTER_LENGTH1,
                       activation='relu',
                       padding='valid',
                       strides=1)(left_x_go)
    left_x_go_max = GlobalMaxPooling1D()(left_x_go)  #pool_size=pool_length[i]
    left_x_go_avg = GlobalAveragePooling1D()(
        left_x_go)  #pool_size=pool_length[i]

    right_x_go = Conv1D(filters=NUM_FILTERS,
                        kernel_size=FILTER_LENGTH2,
                        activation='relu',
                        padding='valid',
                        strides=1)(right_input_go)
    right_x_go = Conv1D(filters=NUM_FILTERS * 2,
                        kernel_size=FILTER_LENGTH2,
                        activation='relu',
                        padding='valid',
                        strides=1)(right_x_go)
    right_x_go = Conv1D(filters=NUM_FILTERS * 3,
                        kernel_size=FILTER_LENGTH2,
                        activation='relu',
                        padding='valid',
                        strides=1)(right_x_go)
    right_x_go_max = GlobalMaxPooling1D()(right_x_go)
    right_x_go_avg = GlobalAveragePooling1D()(right_x_go)

    x = Concatenate()(
        [left_x_go_avg, left_x_go_max, right_x_go_avg, right_x_go_max])

    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.1)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.1)(x)
    x = Dense(512, activation='relu')(x)

    x = Dense(1)(x)
    output = Activation('sigmoid')(x)
    # model = Model([left_input_go, right_input_go], output)
    optimizer = Lookahead(RAdam())

    model = Model([left_input_go, right_input_go], output)
    #     model = multi_gpu_model(model, gpus=2)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
Ejemplo n.º 4
0
def ensemble_models(input_data: str,
                    test_file=None,
                    models_file=None,
                    ga_file=None,
                    ensemble_method="average",
                    batch_size=64,
                    nb_epoch=100,
                    mod=None,
                    max_x_length=50,
                    scale_para=None,
                    unit="s",
                    out_dir="./",
                    prefix="test",
                    use_radam=False,
                    early_stop_patience=None,
                    add_reverse=False,
                    add_ReduceLROnPlateau=False):
    """
    This function is used to ensemble multiple deep learning models. It can be used for training and testing.
    """

    # print("The number of models:", len(models))

    # test data
    X_test = np.empty(1)
    Y_test = np.empty(1)

    y_pr = []
    score = []

    model_list = dict()

    if ga_file is not None:
        X_train, Y_train, X_test, Y_test, scale_para = data_processing(
            input_data=input_data,
            test_file=test_file,
            mod=mod,
            max_x_length=max_x_length,
            scale_para=scale_para,
            unit=unit,
            out_dir=out_dir,
            add_reverse=add_reverse)
        model_list['dp_model'] = dict()
        model_list['max_x_length'] = X_train.shape[1]
        model_list['aa'] = out_dir + "/aa.tsv"
        ## Useful for new data prediction
        model_list['min_rt'] = scale_para['rt_min']
        model_list['max_rt'] = scale_para['rt_max']
        if add_reverse is True:
            model_list['add_reverse'] = 1
            model_list['max_x_length'] = model_list['max_x_length'] / 2
        else:
            model_list['add_reverse'] = 0

        print("max_x_length: %s" % (max_x_length))
        # read models from genetic search result configure file
        optimizer_name = dict()

        with open(ga_file, "r") as read_file:
            ga_model_list = json.load(read_file)

        model_folder = os.path.dirname(ga_file)
        models = dict()
        for i in ga_model_list.keys():
            m_file = model_folder + "/" + os.path.basename(
                ga_model_list[i]['model'])
            print("Model file: %s -> %s" % (str(i), m_file))

            with open(m_file, "r") as json_read:
                models[i] = keras.models.model_from_json(json_read.read())

            models[i]._layers[1].batch_input_shape = (None, X_train.shape[1],
                                                      X_train.shape[2])
            optimizer_name[i] = ga_model_list[i]['optimizer_name']

        print("Training ...")
        # For each model, train the model
        for (name, model) in models.items():
            print("Train model:", name)
            # perform sample specific training
            res_map = train_model(input_data=input_data,
                                  test_file=test_file,
                                  batch_size=batch_size,
                                  nb_epoch=nb_epoch,
                                  early_stop_patience=early_stop_patience,
                                  mod=mod,
                                  max_x_length=max_x_length,
                                  scale_para=scale_para,
                                  unit=unit,
                                  out_dir=out_dir,
                                  prefix=str(name),
                                  model=model,
                                  optimizer_name=optimizer_name[name],
                                  use_radam=use_radam,
                                  add_reverse=add_reverse,
                                  add_ReduceLROnPlateau=add_ReduceLROnPlateau)

            ## save the model to a file:
            model_file_name = "model_" + str(name) + ".h5"
            model_file_path = out_dir + "/" + model_file_name
            res_map["model"].save(model_file_path)

            model_list['dp_model'][name] = model_file_path

            del res_map
            gc.collect()
            K.clear_session()
            tf.reset_default_graph()

    else:

        print("Transfer learning ...")

        ## Transfer learning
        with open(models_file, "r") as read_file:
            model_list = json.load(read_file)

        model_folder = os.path.dirname(models_file)
        aa_file = os.path.basename(model_list['aa'])
        aa_file = model_folder + "/" + aa_file

        new_model_list = dict()
        new_model_list['dp_model'] = dict()
        model_i = 1

        ## peptide length check
        peptide_max_length = get_max_length_from_input_data(input_data)
        if peptide_max_length > model_list['max_x_length']:
            print(
                "The max length (%d) in the training data should be <= the length supported by the model %d"
                % (peptide_max_length, model_list['max_x_length']))
            sys.exit()

        for (name, dp_model_file) in model_list['dp_model'].items():
            print("\nDeep learning model:", name)

            X_train, Y_train, X_test, Y_test, scale_para = data_processing(
                input_data=input_data,
                test_file=test_file,
                mod=mod,
                max_x_length=model_list['max_x_length'],
                scale_para=scale_para,
                unit=unit,
                out_dir=out_dir,
                aa_file=aa_file,
                add_reverse=add_reverse,
                random_seed=model_i)

            model_i = model_i + 1
            # keras model evaluation: loss and accuracy
            # load model
            model_name = os.path.basename(dp_model_file)
            model_full_path = model_folder + "/" + model_name

            if use_radam == True:
                model = load_model(model_full_path,
                                   custom_objects={
                                       "Lookahead": Lookahead,
                                       "RAdam": RAdam
                                   })
            else:
                model = load_model(model_full_path)
            #new_model = change_model(model, X_train.shape[1:])
            new_model = model

            print(get_peptide_length_from_model(new_model))
            if "add_reverse" in model_list.keys():
                if model_list['add_reverse'] == 1:
                    if 2 * model_list[
                            'max_x_length'] != get_peptide_length_from_model(
                                new_model):
                        print(
                            "The max length (%d) in the training data should be less than the length supported by the model %d"
                            % (model_list['max_x_length'],
                               get_peptide_length_from_model(new_model)))
                else:
                    if model_list[
                            'max_x_length'] != get_peptide_length_from_model(
                                new_model):
                        print(
                            "The max length (%d) in the training data should be less than the length supported by the model %d"
                            % (model_list['max_x_length'],
                               get_peptide_length_from_model(new_model)))
            else:
                if model_list['max_x_length'] != get_peptide_length_from_model(
                        new_model):
                    print(
                        "The max length (%d) in the training data should be less than the length supported by the model %d"
                        % (model_list['max_x_length'],
                           get_peptide_length_from_model(new_model)))

            print("Perform transfer learning ...")
            n_layers = len(new_model.layers)
            print("The number of layers: %d" % (n_layers))
            '''
            for layer in new_model.layers:
                layer_name = str(layer.name)
                if layer_name.startswith("bidirectional_"):
                    break
                else:
                    layer.trainable = False
                    print("layer (frozen:True): %s" % (layer_name))
            '''

            print(model.optimizer)
            if use_radam == True:
                print("Use optimizer: %s" % ("rectified-adam"))
                new_model.compile(
                    loss='mean_squared_error',
                    ## In this case, we cannot change the learning rate.
                    optimizer=Lookahead(RAdam(), sync_period=5, slow_step=0.5))
                #optimizer=RAdam(),
                #optimizer=Lookahead(RAdam(total_steps=1000, warmup_proportion=0.1, min_lr=1e-5, lr=0.001),
                #                    sync_period=5, slow_step=0.5))
                # optimizer=Adam(lr=0.0001),
                # optimizer=SGD(lr=1e-3, decay=1e-4, momentum=0.9, nesterov=True),
                #metrics=['mean_squared_error'])
            else:
                print("Use optimizer: %s from saved model" %
                      (model.optimizer.__class__.__name__))
                new_model.compile(
                    loss='mean_squared_error',
                    ## In this case, we cannot change the learning rate.
                    #optimizer=model.optimizer)
                    optimizer=model.optimizer.__class__.__name__)
                #optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=0.01/nb_epoch))
                #optimizer=Adam(lr=0.001))
                #optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True))
                #metrics=['mean_squared_error'])

            print("Used optimizer:")
            print(model.optimizer)
            my_callbacks = RegCallback(X_train, X_test, Y_train, Y_test,
                                       scale_para)
            # Save model
            model_chk_path = out_dir + "/best_model.hdf5"
            mcp = ModelCheckpoint(model_chk_path,
                                  save_best_only=True,
                                  save_weights_only=False,
                                  verbose=1,
                                  mode='min')

            all_callbacks = list()
            all_callbacks.append(my_callbacks)
            all_callbacks.append(mcp)

            if add_ReduceLROnPlateau is True:
                print("Use ReduceLROnPlateau!")
                all_callbacks.append(
                    keras.callbacks.ReduceLROnPlateau(patience=3,
                                                      factor=0.5,
                                                      verbose=1,
                                                      min_lr=0.00001,
                                                      min_delta=0))

            if early_stop_patience is not None:
                print("Use EarlyStopping: %d" % (early_stop_patience))
                all_callbacks.append(
                    EarlyStopping(patience=early_stop_patience, verbose=1))

            #all_callbacks.append(LearningRateScheduler(PolynomialDecay(maxEpochs=nb_epoch, initAlpha=0.001, power=5)))

            ## monitor training information
            # tbCallBack = callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
            new_model.fit(
                X_train,
                Y_train,
                batch_size=batch_size,
                epochs=nb_epoch,
                validation_data=(X_test, Y_test),
                callbacks=all_callbacks
            )  #, keras.callbacks.ReduceLROnPlateau(patience=3, factor=0.5, verbose=1, min_lr=0.000001)])

            ## get the best model
            if use_radam == True:
                best_model = load_model(model_chk_path,
                                        custom_objects={
                                            "Lookahead": Lookahead,
                                            "RAdam": RAdam
                                        })
            else:
                best_model = load_model(model_chk_path)
            ## save the model to a file:
            model_file_name = "model_" + str(name) + ".h5"
            model_file_path = out_dir + "/" + model_file_name
            best_model.save(model_file_path)

            new_model_list['dp_model'][name] = model_file_path

            gc.collect()
            K.clear_session()
            tf.reset_default_graph()

        new_model_list['max_x_length'] = model_list['max_x_length']
        new_aa_file = out_dir + "/" + os.path.basename(model_list['aa'])
        copyfile(aa_file, new_aa_file)
        new_model_list['aa'] = new_aa_file

        ## Useful for new data prediction
        #new_model_list['min_rt'] = scale_para['rt_min']
        #new_model_list['max_rt'] = scale_para['rt_max']

        model_list = new_model_list

    # save model data
    #file_all_models = open(out_dir + "/all_models.obj", 'wb')
    #pickle.dump(models, file_all_models)
    #file_all_models.close()

    ####################################################################################################################
    print("Ensemble learning ...")

    para = dict()
    para['rt_min'] = scale_para['rt_min']
    para['rt_max'] = scale_para['rt_max']
    para['scaling_method'] = scale_para['scaling_method']

    model_list['rt_min'] = scale_para['rt_min']
    model_list['rt_max'] = scale_para['rt_max']
    model_list['scaling_method'] = scale_para['scaling_method']

    if scale_para['scaling_method'] == "mean_std":
        para['rt_mean'] = scale_para['rt_mean']
        para['rt_std'] = scale_para['rt_std']
        model_list['rt_mean'] = scale_para['rt_mean']
        model_list['rt_std'] = scale_para['rt_std']

    elif scale_para['scaling_method'] == "single_factor":
        para['scaling_factor'] = scale_para['scaling_factor']
        model_list['scaling_factor'] = scale_para['scaling_factor']

    if add_reverse is True:
        model_list['add_reverse'] = 1
        para['add_reverse'] = 1
    else:
        model_list['add_reverse'] = 0
        para['add_reverse'] = 0

    ## save result
    model_json = out_dir + "/model.json"
    with open(model_json, 'w') as f:
        json.dump(model_list, f)

    ## evaluation
    if test_file is not None:
        ensemble_predict(model_json,
                         x=X_test,
                         y=Y_test,
                         para=para,
                         batch_size=batch_size,
                         method=ensemble_method,
                         out_dir=out_dir,
                         prefix="final_eval")
Ejemplo n.º 5
0
def run(model, backbone, batchsize, lr, epochs, data_path, data_name, fold,
        labels, partial, illustrations, ngpus, save_model):
    BACKBONE = backbone
    BATCH_SIZE = batchsize
    LR = lr
    EPOCHS = epochs

    DATA_DIR = Path(data_path) / (data_name + ("_FOLD%d" % fold))

    x_train_dir = os.path.join(DATA_DIR, 'train_images')
    y_train_dir = os.path.join(DATA_DIR, 'train_labels')

    x_valid_dir = os.path.join(DATA_DIR, 'val_images')
    y_valid_dir = os.path.join(DATA_DIR, 'val_labels')

    x_test_dir = os.path.join(DATA_DIR, 'test_images')
    y_test_dir = os.path.join(DATA_DIR, 'test_labels')

    preprocess_input = sm.get_preprocessing(BACKBONE)

    all_classes = json.loads(open(DATA_DIR / "meta" / "codes.json").read())
    all_weights = json.loads(open(DATA_DIR / "meta" / "weights.json").read())

    if labels == "txt":
        CLASSES = ['text', 'tabelleninhalt']
        if illustrations:
            CLASSES.append('illustration')
    elif labels == "sep":
        CLASSES = ['tab', 'h', 'v']
    else:
        raise RuntimeError("unknown label type %s" % labels)

    background_w_inv = 0
    for c, w in zip(all_classes, all_weights):
        if c not in CLASSES:
            background_w_inv += 1 / w
    WEIGHTS = [all_weights[all_classes.index(s)]
               for s in CLASSES] + [1 / background_w_inv]

    # reorder for background label.
    #CLASSES = CLASSES[1:]
    #WEIGHTS = WEIGHTS[1:] + [WEIGHTS[0]]

    # define network parameters
    n_classes = 1 if len(CLASSES) == 1 else (
        len(CLASSES) + 1)  # case for binary and multiclass segmentation
    activation = 'sigmoid' if n_classes == 1 else 'softmax'

    print("creating model...")

    #create model
    models = dict(unet=sm.Unet,
                  pspnet=sm.PSPNet,
                  linknet=sm.Linknet,
                  fpn=sm.FPN)
    kwargs = dict()
    if model == "pspnet":
        kwargs['input_shape'] = (768, 512, 3)
    my_model = (models[model])(BACKBONE,
                               classes=n_classes,
                               activation=activation,
                               **kwargs)

    # define optomizer
    #clr = tensorflow_addons.optimizers.TriangularCyclicalLearningRate(
    #    initial_learning_rate=LR / 1000, maximal_learning_rate=LR,
    #    step_size=2000)

    #optim = keras.optimizers.SGD(learning_rate=LR)
    #optim = NovoGrad(100)
    #optim = Lookahead(Adam(2 * 1e-3))  # was 2 * 1e-3
    #optim = Lookahead(RAdam(2 * 1e-3, min_lr=1e-5))
    #optim = Lookahead(SGD(2 * 1e-3))

    # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
    # set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)
    #dice_loss = sm.losses.DiceLoss(class_weights=np.array(WEIGHTS))
    #focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
    #total_loss = dice_loss + (args.focal * focal_loss)
    #total_loss = sm.losses.CategoricalCELoss(class_weights=np.array(WEIGHTS))

    if labels == "txt":
        total_loss = sm.losses.CategoricalCELoss(
            class_weights=np.array(WEIGHTS))
    elif labels == "sep":
        total_loss = sm.losses.DiceLoss(class_weights=np.array(WEIGHTS))
    else:
        raise RuntimeError("unknown label type %s" % args.labels)

    # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
    # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss

    normed_weights = np.array(WEIGHTS)
    normed_weights /= np.sum(normed_weights)

    metrics = [
        sm.metrics.IOUScore(threshold=0.5, name="iou_score"),
        sm.metrics.Precision(threshold=0.5, name="precision"),
        sm.metrics.Recall(threshold=0.5, name="recall")
    ]

    # Dataset for train images
    train_dataset = Dataset(x_train_dir,
                            y_train_dir,
                            classes=CLASSES,
                            augmentation=get_training_augmentation(),
                            preprocessing=get_preprocessing(preprocess_input),
                            partial=partial)

    # Dataset for validation images
    valid_dataset = Dataset(
        x_valid_dir,
        y_valid_dir,
        classes=CLASSES,
        augmentation=get_validation_augmentation(),
        preprocessing=get_preprocessing(preprocess_input),
    )

    train_dataloader = Dataloader(train_dataset,
                                  batch_size=BATCH_SIZE,
                                  shuffle=True)
    valid_dataloader = Dataloader(valid_dataset, batch_size=1, shuffle=False)

    optim = Lookahead(
        AdamW(
            2 * LR,  # was: 5 * LR
            #beta_1=0.8,
            #eta_min=0.1 / 2,
            use_cosine_annealing=True,
            batch_size=batchsize,
            total_iterations=len(train_dataset) //
            batchsize))  #, slow_step=0.25)

    if ngpus > 1:
        #from keras.utils import multi_gpu_model
        #my_model = multi_gpu_model(my_model, gpus=ngpus)
        my_model = keras.utils.multi_gpu_model(my_model, gpus=ngpus)

    print("compiling model...")

    # compile keras model with defined optimozer, loss and metrics
    my_model.compile(optim, total_loss, metrics)

    if False:
        import matplotlib.pyplot as plt
        lr_finder = LRFinder(my_model)
        lr_finder.find(train_dataloader,
                       num_iter=len(train_dataloader) * 1,
                       start_lr=1e-3,
                       end_lr=1e-1)
        lr_finder.plot_loss()
        with open("temp.csv", "w") as f:
            for a, b in zip(lr_finder.lrs, lr_finder.losses):
                f.write("%f;%f\n" % (a, b))
        plt.savefig('lr.png')
        sys.exit(0)

    # check shapes for errors
    #assert train_dataloader[0][0].shape == (BATCH_SIZE, 768, 512, 3)
    #assert train_dataloader[0][1].shape == (BATCH_SIZE, 768, 512, n_classes)

    #keras.callbacks.ReduceLROnPlateau(mode='max', monitor='val_f0.5', patience=4, factor=0.5),
    #keras.callbacks.EarlyStopping(mode='max', monitor='val_f0.5', patience=25, min_delta=0.01),
    #OneCycleScheduler(3e-3, pct_start=0.9, start_div=3e-3 / 2e-3, end_div=1),

    save_model_path = None
    if save_model:
        save_model_path = get_tmp_model_path()

    # define callbacks for learning rate scheduling and best checkpoints saving
    callbacks = [
        LogMetricsToSacred(),
        EvaluateMoreMetrics(valid_dataset, n_classes, save_model_path),
        keras.callbacks.TerminateOnNaN()
        #keras.callbacks.CSVLogger(str(model_path / "training.csv"), separator=',', append=False),
    ]

    if False:
        callbacks.append(
            keras.callbacks.ModelCheckpoint(str(model_path),
                                            save_weights_only=True,
                                            save_best_only=True,
                                            mode='max',
                                            monitor='val_matthews'))

    print("starting fit...")

    # train model
    history = my_model.fit_generator(train_dataloader,
                                     steps_per_epoch=len(train_dataloader),
                                     epochs=EPOCHS,
                                     callbacks=callbacks,
                                     validation_data=valid_dataloader,
                                     validation_steps=len(valid_dataloader))

    if save_model_path:
        ex.add_artifact(str(save_model_path))

    # evaluation.

    if False:
        lsd_name = ('pixel_accuracy', 'mean_accuracy', 'mean_IU',
                    'frequency_weighted_IU')
        lsd_value = defaultdict(list)

        for i in range(len(valid_dataset)):

            image, gt_mask = valid_dataset[i]
            image = np.expand_dims(image, axis=0)
            pr_mask = my_model.predict(image)

            metrics = lsd_metrics(
                np.argmax(pr_mask.squeeze(), axis=-1).astype(np.uint8),
                np.argmax(gt_mask.squeeze(), axis=-1).astype(np.uint8),
                n_classes)

            for k in lsd_name:
                lsd_value[k].append(float(getattr(metrics, k)))

        for k, v in lsd_value.items():
            ex.log_scalar("val_" + k, np.mean(np.array(v)))
Ejemplo n.º 6
0
parser.add_argument('--batch', help='To do batching or not',
                    required=True)  # True/False
args = vars(parser.parse_args())

from keras.utils import multi_gpu_model
from keras.models import load_model
# model = Inception_Inflated3d(num_frames = n_sequence, classes=3, input_shape=(32,168,64,3))
# p_model = multi_gpu_model(model, gpus=1)
# p_model.load_weights(args['path'])  # load multi-gpu model weights
# old_model = p_model.layers[-2]   #get single GPU model weights
# # it's necessary to save the model before use this single GPU model
# old_model.save("singlu_gpu.hdf5")
# model.load("single_gpu.hdf5")
# model = load_model(args['path'])
model = load_model(args['path'], compile=False).layers[-2]
model.compile(optimizer=Lookahead(RAdam()),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
# model = Inception_Inflated3d(num_frames=64, classes=3, input_shape=(64,168,64,1))
# model = model.load_weights(args['path']).layers[-2]

print(args['mode'], "Model loaded")

cap = cv2.VideoCapture(args['video'])
print("Video loaded")

w = int(cap.get(3))
h = int(cap.get(4))
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
print("creating writer")
out = cv2.VideoWriter('output.avi', fourcc, 25.0, (w, h))
Ejemplo n.º 7
0
    def compile_models(self):
        """
		Compile full training model with chosen hyperparameters.
		"""
        if self.hyperparameters[
                "clip_select_flag"] == "norm" and self.hyperparameters[
                    "optimizer"] == "adam":
            self.optimizer = Lookahead(
                keras.optimizers.adam(
                    lr=self.hyperparameters["learning_rate"],
                    beta_1=self.hyperparameters["BETA_1"],
                    beta_2=self.hyperparameters["BETA_2"],
                    decay=self.hyperparameters["lr_decay"],
                    amsgrad=self.hyperparameters["ams_grad_flag"]),
                sync_period=self.hyperparameters["sync_lookahead"],
                slow_step=self.hyperparameters["slow_weights_lookahead"])
        elif self.hyperparameters[
                "clip_select_flag"] == "value" and self.hyperparameters[
                    "optimizer"] == "adam":
            self.optimizer = Lookahead(
                keras.optimizers.adam(
                    lr=self.hyperparameters["learning_rate"],
                    beta_1=self.hyperparameters["BETA_1"],
                    beta_2=self.hyperparameters["BETA_2"],
                    decay=self.hyperparameters["lr_decay"],
                    clipvalue=self.hyperparameters["clip_val_thresh"],
                    amsgrad=self.hyperparameters["ams_grad_flag"]),
                sync_period=self.hyperparameters["sync_lookahead"],
                slow_step=self.hyperparameters["slow_weights_lookahead"])
        elif (self.hyperparameters["clip_select_flag"] == "norm"
              or self.hyperparameters["clip_select_flag"]
              == "value") and self.hyperparameters["optimizer"] == "nadam":
            self.optimizer = keras.optimizers.nadam(
                lr=self.hyperparameters["learning_rate"],
                beta_1=self.hyperparameters["BETA_1"],
                beta_2=self.hyperparameters["BETA_2"],
                epsilon=self.hyperparameters["epsilon"],
                schedule_decay=self.hyperparameters["lr_decay"])
        elif self.hyperparameters["optimizer"] == "RAdam":
            self.optimizer = RAdam(
                learning_rate=self.hyperparameters["learning_rate"],
                beta_1=self.hyperparameters["BETA_1"],
                beta_2=self.hyperparameters["BETA_2"],
                epsilon=self.hyperparameters["epsilon"],
                weight_decay=self.hyperparameters["weight_decay_RAdam"],
                amsgrad=self.hyperparameters["ams_grad_flag"],
                total_steps=self.hyperparameters["total_steps_RAdam"],
                warmup_proportion=self.hyperparameters["warmup_RAdam"],
                min_lr=self.hyperparameters["min_lr_RAdam"])
        elif self.hyperparameters["optimizer"] == "Ranger":
            self.optimizer = Lookahead(
                RAdam(learning_rate=self.hyperparameters["learning_rate"],
                      beta_1=self.hyperparameters["BETA_1"],
                      beta_2=self.hyperparameters["BETA_2"],
                      epsilon=self.hyperparameters["epsilon"],
                      weight_decay=self.hyperparameters["weight_decay_RAdam"],
                      amsgrad=self.hyperparameters["ams_grad_flag"],
                      total_steps=self.hyperparameters["total_steps_RAdam"],
                      warmup_proportion=self.hyperparameters["warmup_RAdam"],
                      min_lr=self.hyperparameters["min_lr_RAdam"]),
                sync_period=self.hyperparameters["sync_lookahead"],
                slow_step=self.hyperparameters["slow_weights_lookahead"])
        else:
            print(" Clipping Method OR Optimizer Selected is not available! ")
            print(
                " Please enter a valid string for these parameter: \n Valid Clipping:['norm', 'value'] \n Valid Optimizers: ['adam', 'NAdam', 'RAdam', 'Ranger']"
            )
            sys.exit(1)
        self.lstm_model.compile(optimizer=self.optimizer,
                                loss=self.hyperparameters["loss_fcn"],
                                metrics=[rmse])
print('[DEBUG] lb_classes_', lb.classes_)
print('[DEBUG] trainY', trainY.shape)
print('[DEBUG] testY', testY.shape)

# save label classes to file essences.txt
f = open(cfg.output_PATH + "essences.txt", "w+")
f.write(str(lb.classes_))
f.close()

# construct the image generator for data augmentation
aug = ImageDataGenerator(width_shift_range=0.1,
                         vertical_flip=True,
                         rotation_range=180,
                         height_shift_range=0.1,
                         horizontal_flip=True)
opt = Lookahead(RAdam(min_lr=cfg.INIT_LR_RADAM))
# construct the set of callbacks
# radam I removed LearningRateScheduler(poly_decay) from the callbacks
# figPath = cfg.output_PATH + "{}.png".format(os.getpid())
# jsonPath = cfg.output_PATH + "{}.png".format(os.getpid())
# callbacks = [TrainingMonitor(figPath, jsonPath=jsonPath), LearningRateScheduler(poly_decay)]
# construct the set of callbacks
callbacks = [
    EpochCheckpoint(cfg.EPOCH_PATH,
                    every=cfg.EPOCH_EVERY,
                    startAt=cfg.EPOCH_START),
    TrainingMonitor(cfg.FIG_PATH + "{}.png".format(os.getpid()),
                    jsonPath=cfg.JSON_PATH + "{}.png".format(os.getpid()),
                    startAt=cfg.EPOCH_START)
]