def load_model(directory):
    from keras.models import load_model as keras_load_model
    model = keras_load_model(os.path.join(directory, BULK))
    with open(os.path.join(directory, METADATA), "rb") as conf:
        meta = json.load(conf)
    
    model.metadata = meta
    return model
 def __init__(self, teacher_model):
     self.train_model, self.born_again_model = None, None
     self.temperature = args.temperature
     self.teacher_model = keras_load_model(teacher_model)
     for i in range(len(self.teacher_model.layers)):
         self.teacher_model.layers[i].trainable = False
     self.teacher_model.compile(optimizer="adam",
                                loss="categorical_crossentropy")
     self.train_model, self.born_again_model = self.prepare()
     self.train_model = convert_gpu_model(self.train_model)
Exemple #3
0
def insert_deep_learning_model(pipeline_step, file_name):
    # This is where we saved the random_name for this model
    random_name = pipeline_step.model
    # Load the Keras model here
    keras_file_name = file_name[:-5] + random_name + '_keras_deep_learning_model.h5'

    model = keras_load_model(keras_file_name)

    # Put the model back in place so that we can still use it to get predictions without having to load it back in from disk
    return model
Exemple #4
0
def load_model(model_path, optimizer_='adam'):
    '''
    Load model
    model_path: Path to the model file
    '''
    model = keras_load_model(model_path)
    model.load_weights(model_path)
    model = compile_model(model, optimizer_)

    return model
def get_reconstructions(model_type, x_train, x_test, count):
    params = get_model_parameters(model_type, x_train, x_test)

    if exists(params["model_filename"]):
        print("loading model : '%s'" % params["model_filename"])
        m = keras_load_model(params["model_filename"])
    else:
        m = train_nn(params)

    print("reconstruction prediction...")
    Renc = m.predict(params["x_train"][:count, ...])  # reconstruction

    return Renc
Exemple #6
0
def load_object(root_folder, obj_descr_type, model_name, dataset,
                refactoring_name):
    if model_name == 'deep-learning' and obj_descr_type == 'model':
        file_name = root_folder + "/" + obj_descr_type + "-" + model_name + "-" + dataset + "-" + refactoring_name.replace(
            " ", "") + ".h5"
        return keras_load_model(file_name,
                                custom_objects={
                                    "binary_precision": binary_precision(),
                                    "binary_recall": binary_recall()
                                })
    else:
        file_name = root_folder + "/" + obj_descr_type + "-" + model_name + "-" + dataset + "-" + refactoring_name.replace(
            " ", "") + ".joblib"
        return load(file_name)
Exemple #7
0
def predict(model_file, sdf_file, id_col=None):
    from keras.models import load_model as keras_load_model
    import theano
    import trainer
    theano.config.warn.round = False

    model = keras_load_model(model_file)
    config = model.get_config()
    fp_size = config[0]['config']['units']

    data = trainer.read_sdf(sdf_file)
    fps = trainer.generate_fps(data, fp_size=fp_size)
    test_predictions = model.predict(fps)
    return data, test_predictions
def load_model():
    # load the pre-trained Keras model (here we are using a model
    # pre-trained on ImageNet and provided by Keras, but you can
    # substitute in your own networks just as easily)
    # load as well the dictionary with labels to match with predictions
    global session
    session = tf.Session()
    set_session(session)

    global model
    model_filepath = "models/export-full-model.hdf5"
    model = keras_load_model(model_filepath)
    model._make_predict_function()

    global idx_to_class
    with open("models/classes.json") as f:
        idx_to_class = json.load(f)
Exemple #9
0
def load_model(model_file, network_config):
    # Import for loading legacy models.
    from keras_contrib.layers import Deconvolution3D  # noqa

    model = keras_load_model(model_file)

    # If necessary, wrap the loaded model to transpose the axes for both
    # inputs and outputs.
    if network_config.transpose:
        inputs = []
        perms = []
        for old_input in model.input_layers:
            input_shape = np.asarray(old_input.input_shape)[[3, 2, 1, 4]]
            new_input = Input(shape=tuple(input_shape),
                              dtype=old_input.input_dtype,
                              name=old_input.name)
            perm = Permute((3, 2, 1, 4),
                           input_shape=tuple(input_shape))(new_input)
            inputs.append(new_input)
            perms.append(perm)

        old_outputs = model(perms)
        if not isinstance(old_outputs, list):
            old_outputs = [old_outputs]

        outputs = []
        for old_output in old_outputs:
            new_output = Permute((3, 2, 1, 4))(old_output)
            outputs.append(new_output)

        new_model = Model(input=inputs, output=outputs)

        # Monkeypatch the save to save just the underlying model.
        func_type = type(model.save)

        old_model = model

        def new_save(_, *args, **kwargs):
            old_model.save(*args, **kwargs)

        new_model.save = func_type(new_save, new_model)

        model = new_model

    return model
Exemple #10
0
def model_load(root_path):
    """
    Notice that it is a calling function dedicated to OSARA Tool.

    Load model(keras.models.Sequential format).

    !Required: Function Name is "model_load"

    :type root_path: str
    :rtype: keras.models.Sequential

    >>> root_path = r"C:\cygwin64\home\spc000\python\\new_project\model"
    >>> model = model_load(root_path)
    """
    root_path = Path(root_path)
    model_path = root_path.joinpath("vggnet_model_acc_0.69.hdf5")
    model = keras_load_model(model_path)
    return model
Exemple #11
0
def load_model():
    # load the pre-trained Keras model (here we are using a model

    global model
    input_shape = (img_width, img_height, 3)
    model = Sequential()
    model.add(Conv2D(32, (3, 3), input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Activation('hard_sigmoid'))

    model = keras_load_model('model.h5')
Exemple #12
0
def get_encodings(model_type, x_train, x_test):
    from keras.models import Model

    params = get_model_parameters(model_type, x_train, x_test)

    if params is None:
        m = train_optimal_nn(x_train, x_train, x_test, x_test, params)
    else:
        if exists(params["model_filename"]):
            print("loading model : '%s'" % params["model_filename"])
            m = keras_load_model(params["model_filename"])
        else:
            m = train_nn(params)

    encoder = Model(m.input, m.get_layer('bottleneck').output)

    print("bottleneck prediction...")
    if params is None:
        Zenc = encoder.predict(x_train)
    else:
        Zenc = encoder.predict(params["x_train"])  # bottleneck representation

    print("Zenc.shape : %s" % str(Zenc.shape))
    return Zenc.reshape((Zenc.shape[0], Zenc.shape[-1]))
Exemple #13
0
 def load_model(self, device = "cpu", optimizer = Adam(lr=1e-4), loss = "categorical_crossentropy",\
               metrics = ['accuracy'] ):
     if (device == "cpu"):
         with tf.device("/cpu:0"):
             if (loss == "jaccard"):
                 from jaccard_loss import jaccard_distance
                 self.model = keras_load_model(
                     self.model_path,
                     custom_objects={'jaccard_distance': jaccard_distance})
                 self.model.compile(optimizer,
                                    loss=jaccard_distance,
                                    metrics=metrics)
             elif (loss == "fancy"):
                 from kerasfancyloss import fancy_loss
                 self.model = keras_load_model(
                     self.model_path,
                     custom_objects={'fancy_loss': fancy_loss})
                 self.model.compile(optimizer,
                                    loss=fancy_loss,
                                    metrics=metrics)
             else:
                 self.model = keras_load_model(self.model_path)
                 self.model.compile(optimizer, loss, metrics)
     elif (device == "gpu"):
         if (loss == "jaccard"):
             from jaccard_loss import jaccard_distance
             self.model = keras_load_model(
                 self.model_path,
                 custom_objects={'jaccard_distance': jaccard_distance})
             self.model.compile(optimizer,
                                loss=jaccard_distance,
                                metrics=metrics)
         elif (loss == "fancy"):
             from kerasfancyloss import fancy_loss
             self.model = keras_load_model(
                 self.model_path, custom_objects={'fancy_loss': fancy_loss})
             self.model.compile(optimizer, loss=fancy_loss, metrics=metrics)
         else:
             self.model = keras_load_model(self.model_path)
             self.model.compile(optimizer, loss, metrics)
     else:
         print("Device not understood")
         return None
def load_trigger_model(model_dir):
    model = keras_load_model(os.path.join(model_dir, 'trigger.hdf'))
    return model
Exemple #15
0
def load_model(model_filename):
    return keras_load_model(model_filename)
 def __init__(self, encoder_name, encoder_file_path):
     if encoder_name == "facenet_keras":
         self.encoder_model = keras_load_model(encoder_file_path)
def load_model(model_path):
    return keras_load_model(model_path, {
        'DynamicKMaxPooling': DynamicKMaxPooling,
        'KMaxPooling': KMaxPooling
    })
def load_model(model_path):
    return keras_load_model(model_path)
def load_model(sufix=""):
    return keras_load_model(FILENAME + sufix + EXTENSION)
Exemple #20
0
def basic_run(agent, load_model, architecture, training, optimization, memory,
              policy, testing):
    TRAINED = False

    env_name = "warehouse-v0"
    env = gym.make(env_name)

    # Overwrite the config variables with parse args
    if args.gg_colab is not None:
        training['visualize'], testing["render"] = (args.gg_colab, ) * 2
    if args.do_testing is not None:
        testing["do"] = args.do_testing

    # make the manager available in the environment
    env.manager = manager  # used for metrics logging by the callbacks

    # The model
    nb_actions = env.action_space.n
    input_shape = env.OS.input_shape()
    if load_model is None:
        model = getattr(models, architecture)(input_shape,
                                              nb_actions,
                                              use_bias=False)
    else:
        model = keras_load_model(manager.get_load_path(load_model, 'model.h5'))
        TRAINED = True
    model.build(input_shape)
    print(model.summary())

    # The memory
    memory = getattr(rl_memory, memory["name"])(*memory["args"],
                                                **memory["kwargs"])

    # The policy
    if policy['name'] == 'MaskingMaxBoltzmannQPolicy':
        policy = MaskingMaxBoltzmannQPolicy(*policy["args"],
                                            **policy["kwargs"])
    elif policy['name'] == "LinearAnnealedPolicy":
        policy = LinearAnnealedPolicy(
            getattr(rl_policy, policy["sub_policy"])(), *policy["args"],
            **policy["kwargs"])
    else:
        policy = getattr(rl_policy, policy["name"])(*policy["args"],
                                                    **policy["kwargs"])

    # The agent
    if agent['name'] == 'MaskingDQNAgent':
        dqn = MaskingDQNAgent(model=model,
                              environment=env,
                              nb_actions=nb_actions,
                              memory=memory,
                              policy=policy,
                              *agent["args"],
                              **agent['kwargs'])
    else:
        dqn = agents[agent["name"]](model=model,
                                    nb_actions=nb_actions,
                                    memory=memory,
                                    policy=policy,
                                    *agent["args"],
                                    **agent['kwargs'])
    dqn.compile(getattr(optimizers,
                        optimization["optimizer"])(lr=optimization["lr"]),
                metrics=['mae'])

    if not TRAINED:
        # # Training
        history = dqn.fit(env, verbose=1,
                          **training)  #callbacks=env.callbacks,
        # TODO: Debug verbose=2, error with TrainEpisodeLogger

        # Save history
        manager.log_scalars("history",
                            [history.epoch, *history.history.values()],
                            ["epoch", *history.history.keys()])
        manager.log_scalars("reward", history.history["episode_reward"])
        manager.log_scalars("avg_q", history.history["episode_avg_maxq"])
        manager.log_scalars("avg_minq", history.history["episode_avg_minq"])
        manager.log_scalars("n_step", history.history["nb_steps"])
        manager.log_scalars("nb_episode_steps",
                            history.history["nb_episode_steps"])
        manager.log_scalars("epoch", history.epoch)

        # Post-training
        # manager.save(dqn.model,"model")
        dqn.model.save(os.path.join(manager.save_dir, "model.h5"),
                       overwrite=True)


#    Finally, evaluate our algorithm for 5 episodes.
    if testing["do"]:
        print("\nTesting the agent")
        dqn.test(env,
                 nb_episodes=testing["nb_episodes"],
                 visualize=testing["render"],
                 callbacks=env.callbacks)
 def _load_model(self, filepath_model):
   print('load model:', filepath_model)
   self._model = keras_load_model(filepath_model)
Exemple #22
0
def load_model(use_gmb):
    return keras_load_model(get_model_file_name(use_gmb))
Exemple #23
0
    def run(self,
            epochs=10,
            load_model=False,
            save_model=False,
            train=True,
            evaluate=True,
            plot=True,
            data_prep=True,
            clear_data=False):
        if load_model:
            (train_images, train_labels), (test_images,
                                           test_labels) = load_data()
            if not os.path.exists('%s.h5' % self.model_name):
                print("\nNO MODEL AVAILABLE\n")
                load_model = False
            else:
                print("\nLOADING MODEL...\n")
                self.model = keras_load_model('%s.h5' % self.model_name)

        if clear_data and not load_model:
            while True:
                user_input = input("\nCONFIRM DATA DELETION (y/n): ")
                if user_input == 'y':
                    os.system("sh clear_data.sh")
                    break
                elif user_input == 'n':
                    clear_data = False
                    break

        if data_prep or clear_data:
            print('\n')
            while (True):
                website = input("Scrape Google(0), Flickr(1), or Bing(2)? ")
                if (website == '0'):
                    download_google_images(self.new_height, self.new_width,
                                           load_model)
                    print('\n')
                    break
                elif (website == '1'):
                    download_flickr_photos(self.new_height, self.new_width,
                                           load_model)
                    print('\n')
                    break
                elif (website == '2'):
                    download_bing_photos(self.new_height, self.new_width,
                                         load_model)
                    print('\n')
                    break

        if train and not load_model:
            (train_images, train_labels), (test_images,
                                           test_labels) = load_data()

            train_images = train_images / 255.0
            test_images = test_images / 255.0

            self.model = Sequential()
            self.model.add(
                Conv2D(32, (5, 5),
                       activation='relu',
                       input_shape=(self.new_height, self.new_width, 3)))
            self.model.add(MaxPooling2D(pool_size=(2, 2)))
            self.model.add(Conv2D(32, (5, 5), activation='relu'))
            self.model.add(MaxPooling2D(pool_size=(2, 2)))
            self.model.add(Flatten())
            self.model.add(Dense(1024, activation='relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(512, activation='relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(256, activation='relu'))
            self.model.add(Dense(2, activation='softmax'))

            start_time = time.time()
            self.model.compile(loss='categorical_crossentropy',
                               optimizer='adam',
                               metrics=['accuracy'])
            hist = self.model.fit(train_images,
                                  train_labels,
                                  epochs=epochs,
                                  validation_data=(test_images, test_labels))
            end_time = time.time() - start_time

            if end_time > 3600:
                print('Total Training Time: %dhr %.1fmin\n\n' %
                      (int(end_time / 3600),
                       ((end_time - int(end_time / 3600) * 3600) / 60)))
            elif end_time > 60:
                print("Total Training Time: %dmin %.2fs\n\n" %
                      ((end_time / 60), (end_time - int(end_time / 60) * 60)))
            else:
                print("Total Training Time: %.2fs\n\n" % end_time)

        if save_model and not load_model:
            self.model.save('%s.h5' % self.model_name)

        if evaluate:
            print("TESTING")
            self.model.evaluate(test_images, test_labels)[1]
            print('\n')

        if plot and not load_model:
            f = plt.figure()
            f.add_subplot(2, 1, 1)
            plt.plot(hist.history['accuracy'], label='train accuracy')
            plt.plot(hist.history['val_accuracy'], label='val accuracy')
            plt.title('Model Accuracy (top) and Model Loss (bottom)')
            plt.ylabel('Accuracy')
            plt.legend(loc='lower left')

            f.add_subplot(2, 1, 2)
            plt.plot(hist.history['loss'], label='train loss')
            plt.plot(hist.history['val_loss'], label='val loss')
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.ylim([0, 2])
            plt.legend(loc='lower left')

            plt.show()
def load_trigger_modelfile(filepath):
    return keras_load_model(str(filepath))
    #return (commandString,commandVals)
    return (commandVals)


if len(sys.argv) != 7:
    print("Error with command line inputs")
    sys.exit(0)
else:
    FILE_NAME = sys.argv[1]
    MODEL_PATH = sys.argv[2]
    OUTPUT_PATH = sys.argv[3]
    LOWER_BOUND = int(sys.argv[4])
    UPPER_BOUND = int(sys.argv[5])
    THREADS = int(sys.argv[6])

model = keras_load_model(MODEL_PATH)
nlf = pd.read_csv('NLF.csv', index_col=0)

conn = sqlite3.connect(OUTPUT_PATH)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS predictions(Sequence TEXT,\
                                                  Offset TEXT,\
                                                  Length TEXT,\
                                                  Protein_ID TEXT,\
                                                  PrecursorMZ float,\
                                                  Retention_Time float,\
                                                  Charge integer,\
                                                  b1 TEXT,\
                                                  b2 TEXT,\
                                                  bn1 TEXT,\
                                                  bn2 TEXT,\
Exemple #26
0
 def load_model(self, path):
     """Load Keras model from file."""
     self._model = keras_load_model(path)
Exemple #27
0
def load_model(path):
    return keras_load_model(path)
Exemple #28
0
    for i in zip(array1, array2):
        print(i[0], " | ", i[1])


if len(sys.argv) != 2:
    print("Error with command line inputs")
    sys.exit(0)
else:
    INPUT_PATH = sys.argv[1]

df = pd.read_pickle(INPUT_PATH + "dfTest.pkl").reindex()

xTest = np.load(INPUT_PATH + "XTest.npy")
yTest = np.load(INPUT_PATH + "YTest.npy")
#model = keras_load_model(INPUT_PATH + "model.h5", custom_objects={'cosine_similarity': cosine_similarity})
model = keras_load_model(INPUT_PATH + "model.h5")

predictions = model.predict(xTest)

ionList = [
    "b1", "b2", "bn1", "bn2", "bo1", "bo2", "y1", "y2", "yn1", "yn2", "yo1",
    "yo2"
]
"""
for i, val in enumerate(predictions):
    printResults(yTest[i], val)
"""

print(xTest.shape)
print(yTest.shape)
print(df.shape)
Exemple #29
0
    def fit(self, X, y):
        global keras_imported, KerasRegressor, KerasClassifier, EarlyStopping, ModelCheckpoint, TerminateOnNaN, keras_load_model
        self.model_name = get_name_from_model(self.model)

        X_fit = X

        if self.model_name[:12] == 'DeepLearning' or self.model_name in [
                'BayesianRidge', 'LassoLars', 'OrthogonalMatchingPursuit',
                'ARDRegression', 'Perceptron', 'PassiveAggressiveClassifier',
                'SGDClassifier', 'RidgeClassifier', 'LogisticRegression'
        ]:
            if scipy.sparse.issparse(X_fit):
                X_fit = X_fit.todense()

            if self.model_name[:12] == 'DeepLearning':
                if keras_imported == False:
                    # Suppress some level of logs
                    os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
                    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
                    from keras.callbacks import EarlyStopping, ModelCheckpoint, TerminateOnNaN
                    from keras.models import load_model as keras_load_model
                    from keras.wrappers.scikit_learn import KerasRegressor, KerasClassifier

                    keras_imported = True

                # For Keras, we need to tell it how many input nodes to expect, which is our num_cols
                num_cols = X_fit.shape[1]

                model_params = self.model.get_params()
                del model_params['build_fn']
                try:
                    del model_params['feature_learning']
                except:
                    pass
                try:
                    del model_params['num_cols']
                except:
                    pass

                if self.type_of_estimator == 'regressor':
                    self.model = KerasRegressor(
                        build_fn=utils_models.make_deep_learning_model,
                        num_cols=num_cols,
                        feature_learning=self.feature_learning,
                        **model_params)
                elif self.type_of_estimator == 'classifier':
                    self.model = KerasClassifier(
                        build_fn=utils_models.make_deep_learning_classifier,
                        num_cols=num_cols,
                        feature_learning=self.feature_learning,
                        **model_params)

        if self.model_name[:12] == 'DeepLearning':
            try:

                if self.is_hp_search == True:
                    patience = 5
                    verbose = 0
                else:
                    patience = 25
                    verbose = 2

                X_fit, y, X_test, y_test = self.get_X_test(X_fit, y)
                try:
                    X_test = X_test.toarray()
                except AttributeError as e:
                    pass
                if not self.is_hp_search:
                    print(
                        '\nWe will stop training early if we have not seen an improvement in validation accuracy in {} epochs'
                        .format(patience))
                    print(
                        'To measure validation accuracy, we will split off a random 10 percent of your training data set'
                    )

                early_stopping = EarlyStopping(monitor='val_loss',
                                               patience=patience,
                                               verbose=verbose)
                terminate_on_nan = TerminateOnNaN()

                now_time = datetime.datetime.now()
                time_string = str(now_time.year) + '_' + str(
                    now_time.month) + '_' + str(now_time.day) + '_' + str(
                        now_time.hour) + '_' + str(now_time.minute)

                temp_file_name = 'tmp_dl_model_checkpoint_' + time_string + str(
                    random.random()) + '.h5'
                model_checkpoint = ModelCheckpoint(temp_file_name,
                                                   monitor='val_loss',
                                                   save_best_only=True,
                                                   mode='min',
                                                   period=1)

                callbacks = [early_stopping, terminate_on_nan]
                if not self.is_hp_search:
                    callbacks.append(model_checkpoint)

                self.model.fit(X_fit,
                               y,
                               callbacks=callbacks,
                               validation_data=(X_test, y_test),
                               verbose=verbose)

                # TODO: give some kind of logging on how the model did here! best epoch, best accuracy, etc.

                if self.is_hp_search is False:
                    self.model = keras_load_model(temp_file_name)

                try:
                    os.remove(temp_file_name)
                except OSError as e:
                    pass
            except KeyboardInterrupt as e:
                print(
                    'Stopping training at this point because we heard a KeyboardInterrupt'
                )
                print(
                    'If the deep learning model is functional at this point, we will output the model in its latest form'
                )
                print(
                    'Note that this feature is an unofficial beta-release feature that is known to fail on occasion'
                )

                if self.is_hp_search is False:
                    self.model = keras_load_model(temp_file_name)
                try:
                    os.remove(temp_file_name)
                except OSError as e:
                    pass

        elif self.model_name[:4] == 'LGBM':
            X_fit = X.toarray()

            X_fit, y, X_test, y_test = self.get_X_test(X_fit, y)

            try:
                X_test = X_test.toarray()
            except AttributeError as e:
                pass

            if self.type_of_estimator == 'regressor':
                eval_metric = 'rmse'
            elif self.type_of_estimator == 'classifier':
                if len(set(y_test)) > 2:
                    eval_metric = 'multi_logloss'
                else:
                    eval_metric = 'binary_logloss'

            verbose = True
            if self.is_hp_search == True:
                verbose = False

            if self.X_test is not None:
                eval_name = 'X_test_the_user_passed_in'
            else:
                eval_name = 'random_holdout_set_from_training_data'

            cat_feature_indices = self.get_categorical_feature_indices()
            if cat_feature_indices is None:
                self.model.fit(X_fit,
                               y,
                               eval_set=[(X_test, y_test)],
                               early_stopping_rounds=100,
                               eval_metric=eval_metric,
                               eval_names=[eval_name],
                               verbose=verbose)
            else:
                self.model.fit(X_fit,
                               y,
                               eval_set=[(X_test, y_test)],
                               early_stopping_rounds=100,
                               eval_metric=eval_metric,
                               eval_names=[eval_name],
                               categorical_feature=cat_feature_indices,
                               verbose=verbose)

        elif self.model_name[:8] == 'CatBoost':
            X_fit = X_fit.toarray()

            if self.type_of_estimator == 'classifier' and len(
                    pd.Series(y).unique()) > 2:
                # TODO: we might have to modify the format of the y values, converting them all to ints, then back again (sklearn has a useful inverse_transform on some preprocessing classes)
                self.model.set_params(loss_function='MultiClass')

            cat_feature_indices = self.get_categorical_feature_indices()

            self.model.fit(X_fit, y, cat_features=cat_feature_indices)

        elif self.model_name[:16] == 'GradientBoosting':
            if not sklearn_version > '0.18.1':
                X_fit = X_fit.toarray()

            patience = 20
            best_val_loss = -10000000000
            num_worse_rounds = 0
            best_model = deepcopy(self.model)
            X_fit, y, X_test, y_test = self.get_X_test(X_fit, y)

            # Add a variable number of trees each time, depending how far into the process we are
            if os.environ.get('is_test_suite', False) == 'True':
                num_iters = list(range(1, 50, 1)) + list(range(
                    50, 100, 2)) + list(range(100, 250, 3))
            else:
                num_iters = list(range(
                    1, 50, 1)) + list(range(50, 100, 2)) + list(
                        range(100, 250, 3)) + list(range(250, 500, 5)) + list(
                            range(500, 1000, 10)) + list(range(
                                1000, 2000, 20)) + list(range(
                                    2000, 10000, 100))
            # TODO: get n_estimators from the model itself, and reduce this list to only those values that come under the value from the model

            try:
                for num_iter in num_iters:
                    warm_start = True
                    if num_iter == 1:
                        warm_start = False

                    self.model.set_params(n_estimators=num_iter,
                                          warm_start=warm_start)
                    self.model.fit(X_fit, y)

                    if self.training_prediction_intervals == True:
                        val_loss = self.model.score(X_test, y_test)
                    else:
                        try:
                            val_loss = self._scorer.score(self, X_test, y_test)
                        except Exception as e:
                            val_loss = self.model.score(X_test, y_test)

                    if val_loss - self.min_step_improvement > best_val_loss:
                        best_val_loss = val_loss
                        num_worse_rounds = 0
                        best_model = deepcopy(self.model)
                    else:
                        num_worse_rounds += 1
                    print(
                        '[' + str(num_iter) +
                        '] random_holdout_set_from_training_data\'s score is: '
                        + str(round(val_loss, 3)))
                    if num_worse_rounds >= patience:
                        break
            except KeyboardInterrupt:
                print(
                    'Heard KeyboardInterrupt. Stopping training, and using the best checkpointed GradientBoosting model'
                )
                pass

            self.model = best_model
            print(
                'The number of estimators that were the best for this training dataset: '
                + str(self.model.get_params()['n_estimators']))
            print('The best score on the holdout set: ' + str(best_val_loss))

        else:
            self.model.fit(X_fit, y)

        if self.X_test is not None:
            del self.X_test
            del self.y_test
        return self
Exemple #30
0
    sys.exit(0)
else:
    INPUT_PATH = sys.argv[1]
    PRED_TYPE = 0  #Ms2
    #PRED_TYPE = 1 #Retention Time

df = pd.read_pickle(INPUT_PATH + "dfTest.pkl").reindex()

xTest = np.load(INPUT_PATH + "XTest.npy")

pepOut = open(INPUT_PATH + "prositInput.csv", "w")
prositCompare = open(INPUT_PATH + "prositCompare.csv", "w")
if PRED_TYPE == 0:
    yTest = np.load(INPUT_PATH + "YTestMs2.npy")
    #model = keras_load_model(INPUT_PATH + "model.h5", custom_objects={'cosine_similarity': cosine_similarity})
    model = keras_load_model(INPUT_PATH + "modelMs2.h5")
    predictions = model.predict(xTest)

    ionList = [
        "b1", "b2", "bn1", "bn2", "bo1", "bo2", "y1", "y2", "yn1", "yn2",
        "yo1", "yo2"
    ]

    jsonArray = {}
    tmpPred = {}

    jsonArrayExp = {}
    tmpPredExp = {}

    for i in range(len(yTest)):
        #for i in range(1000):
Exemple #31
0
def load_model(model_filename, models_folder=MODELS_FOLDER):
    return keras_load_model(os.path.join(models_folder, model_filename))
Exemple #32
0
    def perform_model_inference(self, _path_infer_this_image,
                                _objects_found_info_arr):
        print(
            f"\n\nExecuting model inference on image_to_infer : {_path_infer_this_image}\n"
        )

        ## load the keras pretained model if not already loaded
        if self.reloaded_yolo_model is None:
            print(f"\n\n    LOADED KERAS MODEL      \n\n")
            saved_model_location = r'/home/rohit/PyWDUbuntu/thesis/saved_keras_model/yolov3_coco80.saved.model'
            self.reloaded_yolo_model = keras_load_model(saved_model_location)

        ## set some parameters for network
        net_h, net_w = 416, 416
        obj_thresh, nms_thresh = 0.5, 0.45
        anchors = [[116, 90, 156, 198, 373, 326], [30, 61, 62, 45, 59, 119],
                   [10, 13, 16, 30, 33, 23]]
        labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
                "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
                "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
                "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
                "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
                "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
                "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
                "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
                "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
                "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]

        image_to_infer_cv2 = cv2.imread(_path_infer_this_image)
        image_h, image_w, _ = image_to_infer_cv2.shape
        try:
            image_to_infer_preprocessed = self.preprocess_input(
                image_to_infer_cv2, net_h, net_w)
        except Exception as error_inference_preprocess_image:
            print(
                f"\nFATAL ERROR: Problem reading the input file.\nError message: {error_inference_preprocess_image}\nExit RC=400"
            )
            exit(400)

        ## run the prediction
        yolos = self.reloaded_yolo_model.predict(image_to_infer_preprocessed)
        boxes = []

        for i in range(len(yolos)):
            ## decode the output of the network
            boxes += self.decode_netout(yolos[i][0], anchors[i], obj_thresh,
                                        nms_thresh, net_h, net_w)

        ## correct the sizes of the bounding boxes
        self.correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)

        ## suppress non-maximal boxes
        self.do_nms(boxes, nms_thresh)

        ## draw bounding boxes into the image
        self.draw_boxes(image_to_infer_cv2, boxes, labels, obj_thresh,
                        _objects_found_info_arr)

        ## save the image as intermediate file -- see later whether to return and processing is possible
        cv2.imwrite(r'./intermediate_file_inferenece_image.jpg',
                    (image_to_infer_cv2).astype('uint8'))