示例#1
0
def mixallRNNmodel(trainX,
                   trainY,
                   valX,
                   valY,
                   physical_R_input,
                   folds,
                   train_time=None):

    if (train_time == 0):
        x = LSTM(1024,
                 init='glorot_normal',
                 activation='relu',
                 recurrent_activation='hard_sigmoid')(physical_R_input)
        # x = LSTM(512,init='glorot_normal',activation='relu',return_sequences=True)(physical_all_input)

        x = Dropout(0.4)(x)
        x = BatchNormalization()(x)
        x = Dropout(0.1)(x)

        physical_R_output = Dense(2,
                                  init='glorot_normal',
                                  activation='softmax',
                                  W_regularizer=l2(0.001))(x)

        mixallRNNmodel = Model(physical_R_input, physical_R_output)

        optimization = 'Nadam'
        mixallRNNmodel.compile(loss='binary_crossentropy',
                               optimizer=optimization,
                               metrics=[keras.metrics.binary_accuracy])
    else:
        mixallRNNmodel = load_model('model/' + str(folds) + '/model/' +
                                    str(train_time - 1) + 'RNNNetwork.h5')

    if (trainY is not None):
        weight_checkpointer = ModelCheckpoint(
            filepath='./model/' + str(folds) + '/weight/' + str(train_time) +
            'RNNweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path='model/' + str(folds) + '/model/' +
            str(train_time) + 'RNNNetwork.h5',
            monitor_file_path='model/' + str(folds) + '/loss/' +
            str(train_time) + 'RNNloss.json',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = mixallRNNmodel.fit(
            trainX,
            trainY,
            batch_size=4096,
            nb_epoch=50,
            shuffle=True,
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer],
            class_weight='auto',
            validation_data=(valX, valY))
    return mixallRNNmodel
示例#2
0
def HydrophobicityNetwork(trainX,
                          trainY,
                          valX,
                          valY,
                          physical_H_input,
                          folds,
                          train_time=None):

    if (train_time == 0):
        x = core.Flatten()(physical_H_input)
        x = BatchNormalization()(x)

        x = Dense(1024, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.4)(x)

        x = Dense(512, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.3)(x)

        x = Dense(256, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.2)(x)

        x = Dense(128, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.1)(x)

        physical_H_output = Dense(2,
                                  init='glorot_normal',
                                  activation='softmax',
                                  W_regularizer=l2(0.001))(x)

        HydrophobicityNetwork = Model(physical_H_input, physical_H_output)

        # optimization = SGD(lr=0.01, momentum=0.9, nesterov= True)
        optimization = 'Nadam'
        HydrophobicityNetwork.compile(loss='binary_crossentropy',
                                      optimizer=optimization,
                                      metrics=[keras.metrics.binary_accuracy])
    else:
        HydrophobicityNetwork = load_model('model/' + str(folds) + '/model/' +
                                           str(train_time - 1) +
                                           'HydrophobicityNetwork.h5')

    if (trainY is not None):
        weight_checkpointer = ModelCheckpoint(
            filepath='./model/' + str(folds) + '/weight/' + str(train_time) +
            'Hydrophobicityweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=50)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path='model/' + str(folds) + '/model/' +
            str(train_time) + 'HydrophobicityNetwork.h5',
            monitor_file_path='model/' + str(folds) + '/loss/' +
            str(train_time) + 'Hydrophobicityloss.json',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')

        fitHistory = HydrophobicityNetwork.fit(
            trainX,
            trainY,
            batch_size=512,
            epochs=5000,
            verbose=2,
            validation_data=(valX, valY),
            shuffle=True,
            class_weight='auto',
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer])
    return HydrophobicityNetwork
示例#3
0
def OtherNetwork(trainX,
                 trainY,
                 valX,
                 valY,
                 physical_O_input,
                 folds,
                 train_time=None):

    if (train_time == 0):
        x = core.Flatten()(physical_O_input)
        x = BatchNormalization()(x)

        x = Dense(256, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.2)(x)

        x = Dense(128, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.2)(x)

        x = Dense(64, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.1)(x)

        x = Dense(32, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)

        physical_O_output = Dense(2,
                                  init='glorot_normal',
                                  activation='softmax',
                                  W_regularizer=l2(0.001))(x)

        OtherNetwork = Model(physical_O_input, physical_O_output)

        optimization = Nadam(lr=0.0001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             schedule_decay=0.004)

        OtherNetwork.compile(loss='binary_crossentropy',
                             optimizer=optimization,
                             metrics=[keras.metrics.binary_accuracy])
    else:
        OtherNetwork = load_model('model/' + str(folds) + '/model/' +
                                  str(train_time - 1) + 'OtherNetwork.h5')

    if (trainY is not None):
        weight_checkpointer = ModelCheckpoint(
            filepath='./model/' + str(folds) + '/weight/' + str(train_time) +
            'Otherweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=200)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path='model/' + str(folds) + '/model/' +
            str(train_time) + 'OtherNetwork.h5',
            monitor_file_path='model/' + str(folds) + '/loss/' +
            str(train_time) + 'Otherloss.json',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')

        fitHistory = OtherNetwork.fit(
            trainX,
            trainY,
            batch_size=512,
            epochs=5000,
            verbose=2,
            validation_data=(valX, valY),
            shuffle=True,
            class_weight='auto',
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer])
    return OtherNetwork
示例#4
0
def OnehotNetwork(trainX,
                  trainY,
                  valX,
                  valY,
                  Oneofkey_input,
                  folds,
                  train_time=None):

    if (train_time == 0):
        # x = conv.Convolution1D(201, 2, init='glorot_normal', W_regularizer=l1(0), border_mode="same")(Oneofkey_input)
        # x = Dropout(0.4)(x)
        # x = Activation('softsign')(x)

        x = conv.Convolution1D(101,
                               3,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(Oneofkey_input)
        x = Dropout(0.4)(x)
        x = Activation('relu')(x)

        x = conv.Convolution1D(101,
                               5,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.4)(x)
        x = Activation('relu')(x)

        x = conv.Convolution1D(101,
                               7,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.4)(x)
        x = Activation('relu')(x)

        x = core.Flatten()(x)
        x = BatchNormalization()(x)

        # x = Dense(256, init='glorot_normal', activation='relu')(x)
        # x = Dropout(0.3)(x)

        x = Dense(128, init='glorot_normal', activation="relu")(x)
        x = Dropout(0)(x)

        Oneofkey_output = Dense(2,
                                init='glorot_normal',
                                activation='softmax',
                                W_regularizer=l2(0.001))(x)

        OnehotNetwork = Model(Oneofkey_input, Oneofkey_output)
        optimization = 'Nadam'
        OnehotNetwork.compile(loss='binary_crossentropy',
                              optimizer=optimization,
                              metrics=[keras.metrics.binary_accuracy])
    else:
        OnehotNetwork = load_model('model/' + str(folds) + '/model/' +
                                   str(train_time - 1) + 'OnehotNetwork.h5')

    if (trainY is not None):
        weight_checkpointer = ModelCheckpoint(
            filepath='./model/' + str(folds) + '/weight/' + str(train_time) +
            'Onehotweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=50)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path='model/' + str(folds) + '/model/' +
            str(train_time) + 'OnehotNetwork.h5',
            monitor_file_path='model/' + str(folds) + '/loss/' +
            str(train_time) + 'Onehotloss.json',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = OnehotNetwork.fit(
            trainX,
            trainY,
            batch_size=512,
            epochs=5000,
            verbose=2,
            validation_data=(valX, valY),
            shuffle=True,
            class_weight='auto',
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer])
    return OnehotNetwork
示例#5
0
def AlphaturnpropensityNetwork(trainX,
                               trainY,
                               valX,
                               valY,
                               physical_A_input,
                               folds,
                               train_time=None):

    if (train_time == 0):
        x = conv.Convolution1D(201,
                               2,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(physical_A_input)
        x = Dropout(0.4)(x)
        x = Activation('softsign')(x)

        x = conv.Convolution1D(151,
                               3,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.3)(x)
        x = Activation('softsign')(x)

        x = conv.Convolution1D(101,
                               5,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.2)(x)
        x = Activation('softsign')(x)

        x = conv.Convolution1D(51,
                               7,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.1)(x)
        x = Activation('softsign')(x)

        x = core.Flatten()(x)
        x = BatchNormalization()(x)
        physical_A_output = Dense(2,
                                  init='glorot_normal',
                                  activation='softmax',
                                  W_regularizer=l2(0.001))(x)

        AlphaturnpropensityNetwork = Model(physical_A_input, physical_A_output)
        optimization = keras.optimizers.Nadam(lr=0.0005,
                                              beta_1=0.9,
                                              beta_2=0.999,
                                              epsilon=1e-08,
                                              schedule_decay=0.004)
        AlphaturnpropensityNetwork.compile(
            loss='binary_crossentropy',
            optimizer=optimization,
            metrics=[keras.metrics.binary_accuracy])
    else:
        AlphaturnpropensityNetwork = load_model(
            'model/' + str(folds) + '/model/' + str(train_time - 1) +
            'AlphaturnpropensityNetwork.h5')

    if (trainY is not None):
        weight_checkpointer = ModelCheckpoint(
            filepath='./model/' + str(folds) + '/weight/' + str(train_time) +
            'Alphaturnpropensityweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=70)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path='model/' + str(folds) + '/model/' +
            str(train_time) + 'AlphaturnpropensityNetwork.h5',
            monitor_file_path='model/' + str(folds) + '/loss/' +
            str(train_time) + 'Alphaturnpropensityloss.json',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')

        fitHistory = AlphaturnpropensityNetwork.fit(
            trainX,
            trainY,
            batch_size=512,
            epochs=5000,
            verbose=2,
            validation_data=(valX, valY),
            shuffle=True,
            class_weight='auto',
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer])
    return AlphaturnpropensityNetwork
示例#6
0
def Capsnet_main(trainX,
                 trainY,
                 valX=None,
                 valY=None,
                 nb_classes=2,
                 nb_epoch=500,
                 earlystop=None,
                 weights=None,
                 compiletimes=0,
                 compilemodels=None,
                 lr=0.001,
                 lrdecay=1,
                 batch_size=500,
                 lam_recon=0.392,
                 routings=3,
                 modeltype=5,
                 class_weight=None,
                 activefun='linear',
                 power=2,
                 predict=False,
                 outputweights=None,
                 monitor_file=None,
                 save_best_only=True,
                 load_average_weight=False):
    print(trainX.shape)
    if len(trainX.shape) > 3:
        trainX.shape = (trainX.shape[0], trainX.shape[2], trainX.shape[3])

    if (valX is not None):
        print(valX.shape)
        if len(valX.shape) > 3:
            valX.shape = (valX.shape[0], valX.shape[2], valX.shape[3])

    if (
            earlystop is not None
    ):  #use early_stop to control nb_epoch there must contain a validation if not provided will select one
        early_stopping = EarlyStopping(monitor='val_loss', patience=earlystop)
        nb_epoch = 10000

    lr_decay = callbacks.LearningRateScheduler(
        schedule=lambda epoch: lr * (lrdecay**epoch))
    if compiletimes == 0:
        model = CapsNet(input_shape=trainX.shape[1:],
                        n_class=nb_classes,
                        routings=routings,
                        modeltype=modeltype)

        if "crossentropy" in modeltype:
            #model.compile(optimizer=optimizers.Adam(lr=lr,epsilon=1e-08),loss=['binary_crossentropy', 'mse'],loss_weights=[1., lam_recon],metrics={'capsnet': 'accuracy'})
            model.compile(optimizer=optimizers.Adam(lr=lr, epsilon=1e-08),
                          loss='binary_crossentropy',
                          metrics=['accuracy'])

        else:
            #model.compile(optimizer=optimizers.Adam(lr=lr,epsilon=1e-08),loss=[margin_loss, 'mse'],loss_weights=[1., lam_recon],metrics={'capsnet': 'accuracy'})
            #if Radam_flag:
            #    model.compile(optimizer=RAdam(),loss=margin_loss,metrics=['accuracy'])
            #else:
            model.compile(optimizer=optimizers.Adam(lr=lr, epsilon=1e-08),
                          loss=margin_loss,
                          metrics=['accuracy'])

    else:
        model = compilemodels

    if (predict is False):
        if (weights is not None and compiletimes == 0):
            print("load weights:" + weights)
            model.load_weights(weights)

        print("##################save_best_only " + str(save_best_only))
        weight_checkpointer = LossModelCheckpoint(
            model_file_path=outputweights + '_iteration' + str(compiletimes),
            monitor_file_path=monitor_file + '_iteration' + str(compiletimes) +
            '.json',
            verbose=1,
            save_best_only=save_best_only,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)

        if valX is not None:
            if (earlystop is None):
                history = model.fit(trainX,
                                    trainY,
                                    batch_size=batch_size,
                                    epochs=nb_epoch,
                                    validation_data=[valX, valY],
                                    class_weight=class_weight,
                                    callbacks=[lr_decay, weight_checkpointer])
            else:
                history = model.fit(
                    trainX,
                    trainY,
                    batch_size=batch_size,
                    epochs=nb_epoch,
                    validation_data=[valX, valY],
                    callbacks=[early_stopping, lr_decay, weight_checkpointer],
                    class_weight=class_weight)
        else:
            history = model.fit(trainX,
                                trainY,
                                batch_size=batch_size,
                                epochs=nb_epoch,
                                class_weight=class_weight,
                                callbacks=[lr_decay, weight_checkpointer])

        if load_average_weight:
            if save_best_only:
                last_weights = model.get_weights()
                model.load_weights(
                    outputweights + '_iteration' + str(compiletimes)
                )  #every iteration need to reload the best model for next run
                saved_weights = model.get_weights()
                avg_merged_weights = list()
                for layer in range(len(last_weights)):
                    avg_merged_weights.append(
                        1 / 2 * (last_weights[layer] + saved_weights[layer]))

                model.set_weights(avg_merged_weights)
        else:
            model.load_weights(
                outputweights + '_iteration' + str(compiletimes)
            )  #every iteration need to reload the best model for next run

        #if load_average_weight:
        #    if save_best_only:
        #       last_weights =model.get_weights()
        #       model.load_weights(outputweights+'_iteration'+str(compiletimes)) #every iteration need to reload the best model for next run
        #       saved_weights = model.get_weights()
        #       avg_merged_weights = 1/2* Add()([last_weights,saved_weights]) #don't knwow if work
        #       model.set_weights(avg_merged_weights)
        #else:
        #    model.load_weights(outputweights+'_iteration'+str(compiletimes)) #every iteration need to reload the best model for next run

    return model
示例#7
0
def AlphaturnpropensityNetwork(
        train_physicalXa,
        trainY,
        val_physicalXa,
        valY,
        #pre_train_total_path = 'model/pretrain.h5',
        train_time=None,
        compilemodels=None):

    physical_A_input = Input(shape=(train_physicalXa.shape[1],
                                    train_physicalXa.shape[2]))  #49*118=5782
    early_stopping = EarlyStopping(monitor='val_loss', mode='min', patience=10)

    if (train_time == 0):
        x = conv.Convolution1D(201,
                               2,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(physical_A_input)
        x = Dropout(0.4)(x)
        x = Activation('relu')(x)

        x = conv.Convolution1D(151,
                               3,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.3)(x)
        x = Activation('relu')(x)

        x = conv.Convolution1D(101,
                               5,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.2)(x)
        x = Activation('relu')(x)

        x = conv.Convolution1D(51,
                               7,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.1)(x)
        x = Activation('relu')(x)

        x = core.Flatten()(x)
        x = BatchNormalization()(x)
        physical_A_output = Dense(2,
                                  init='glorot_normal',
                                  activation='softmax',
                                  W_regularizer=l2(0.001))(x)

        AlphaturnpropensityNetwork = Model(physical_A_input, physical_A_output)

        optimization = 'Nadam'
        AlphaturnpropensityNetwork.compile(
            loss='binary_crossentropy',
            optimizer=optimization,
            metrics=[keras.metrics.binary_accuracy])
    else:
        AlphaturnpropensityNetwork = load_model(
            "model/" + str(train_time - 1) +
            'model/AlphaturnpropensityNetwork.h5')

    if (trainY is not None):
        #checkpointer = ModelCheckpoint(filepath="model/"+str(train_time)+'model/AlphaturnpropensityNetwork.h5',verbose=1,save_best_only=True,monitor='val_loss',mode='min')
        weight_checkpointer = ModelCheckpoint(
            filepath="model/" + str(train_time) +
            'modelweight/AlphaturnpropensityNetworkweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path="model/" + str(train_time) +
            'model/AlphaturnpropensityNetwork.h5',
            monitor_file_path="model/loss/" + str(train_time) +
            "Anetloss.json",
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        AfitHistory = AlphaturnpropensityNetwork.fit(
            train_physicalXa,
            trainY,
            batch_size=4096,
            nb_epoch=50,
            shuffle=True,
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer],
            class_weight='auto',
            validation_data=(val_physicalXa, valY))
        AlphaturnpropensityNetwork.save("model/" + str(train_time) +
                                        'model/1AlphaturnpropensityNetwork.h5')

    return AlphaturnpropensityNetwork
示例#8
0
def OnehotNetwork(
        train_oneofkeyX,
        trainY,
        val_oneofkeyX,
        valY,
        #pre_train_total_path = 'model/pretrain.h5',
        train_time=None,
        compilemodels=None):

    Oneofkey_input = Input(shape=(train_oneofkeyX.shape[1],
                                  train_oneofkeyX.shape[2]))  #49*21=1029
    early_stopping = EarlyStopping(monitor='val_loss', mode='min', patience=10)

    if (train_time == 0):
        x = conv.Convolution1D(201,
                               2,
                               init='glorot_normal',
                               W_regularizer=l1(0),
                               border_mode="same")(Oneofkey_input)
        x = Dropout(0.4)(x)
        x = Activation('relu')(x)

        x = conv.Convolution1D(151,
                               3,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.4)(x)
        x = Activation('relu')(x)

        x = conv.Convolution1D(151,
                               5,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.4)(x)
        x = Activation('relu')(x)

        x = conv.Convolution1D(101,
                               7,
                               init='glorot_normal',
                               W_regularizer=l2(0),
                               border_mode="same")(x)
        x = Dropout(0.4)(x)
        x = Activation('relu')(x)

        x = core.Flatten()(x)
        x = BatchNormalization()(x)

        x = Dense(256, init='glorot_normal', activation='relu')(x)
        x = Dropout(0.298224)(x)

        x = Dense(128, init='glorot_normal', activation="relu")(x)
        x = Dropout(0)(x)

        Oneofkey_output = Dense(2,
                                init='glorot_normal',
                                activation='softmax',
                                W_regularizer=l2(0.001))(x)

        OnehotNetwork = Model(Oneofkey_input, Oneofkey_output)

        optimization = 'Nadam'
        OnehotNetwork.compile(loss='binary_crossentropy',
                              optimizer=optimization,
                              metrics=[keras.metrics.binary_accuracy])
    else:
        OnehotNetwork = load_model("model/" + str(train_time - 1) +
                                   'model/OnehotNetwork.h5')

    if (trainY is not None):
        #checkpointer = ModelCheckpoint(filepath="model/"+str(train_time)+'model/OnehotNetwork.h5',verbose=1,save_best_only=True,monitor='val_loss',mode='min')
        weight_checkpointer = ModelCheckpoint(
            filepath="model/" + str(train_time) +
            'modelweight/OnehotNetworkweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path="model/" + str(train_time) +
            'model/OnehotNetwork.h5',
            monitor_file_path="model/loss/" + str(train_time) +
            "onehotloss.json",
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        onehotfitHistory = OnehotNetwork.fit(
            train_oneofkeyX,
            trainY,
            batch_size=4096,
            nb_epoch=50,
            shuffle=True,
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer],
            class_weight='auto',
            validation_data=(val_oneofkeyX, valY))
        OnehotNetwork.save("model/" + str(train_time) +
                           'model/1OnehotNetwork.h5')

    return OnehotNetwork