Пример #1
0
def main(checkpoint, data_count, data_cols, should_train, nb_epoch, null_pct,
         try_reuse_data, batch_size, execution_config):
    maxlen = 20
    max_cells = 500
    p_threshold = 0.5

    checkpoint_dir = "pretrained_models/"
    if not os.path.isdir(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    with open('Categories.txt', 'r') as f:
        Categories = f.read().splitlines()

    # orient the user a bit
    print("fixed categories are: ")
    Categories = sorted(Categories)
    print(Categories)

    raw_data, header = DataGenerator.gen_test_data((data_count, data_cols),
                                                   try_reuse_data)
    print(raw_data)

    # transpose the data
    raw_data = np.char.lower(np.transpose(raw_data).astype('U'))

    # do other processing and encode the data
    if null_pct > 0:
        DataGenerator.add_nulls_uniform(raw_data, null_pct)
    config = {}
    if not should_train:
        if execution_config is None:
            raise TypeError
        config = Simon({}).load_config(execution_config, checkpoint_dir)
        encoder = config['encoder']
        if checkpoint is None:
            checkpoint = config['checkpoint']
    else:
        encoder = Encoder(categories=Categories)
        encoder.process(raw_data, max_cells)

    # encode the data
    X, y = encoder.encode_data(raw_data, header, maxlen)

    max_cells = encoder.cur_max_cells

    Classifier = Simon(encoder=encoder)

    data = None
    if should_train:
        data = Classifier.setup_test_sets(X, y)
    else:
        data = type('data_type', (object, ), {'X_test': X, 'y_test': y})

    print('Sample chars in X:{}'.format(X[2, 0:10]))
    print('y:{}'.format(y[2]))

    # need to know number of fixed categories to create model
    category_count = y.shape[1]
    print('Number of fixed categories is :')
    print(category_count)

    model = Classifier.generate_model(maxlen, max_cells, category_count)

    Classifier.load_weights(checkpoint, config, model, checkpoint_dir)

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['binary_accuracy'])
    if (should_train):
        start = time.time()
        history = Classifier.train_model(batch_size, checkpoint_dir, model,
                                         nb_epoch, data)
        end = time.time()
        print("Time for training is %f sec" % (end - start))
        config = {
            'encoder': encoder,
            'checkpoint': Classifier.get_best_checkpoint(checkpoint_dir)
        }
        Classifier.save_config(config, checkpoint_dir)
        Classifier.plot_loss(history)  #comment out on docker images...

    pred_headers = Classifier.evaluate_model(max_cells, model, data, encoder,
                                             p_threshold)
    print("DEBUG::The predicted headers are:")
    print(pred_headers)
    print("DEBUG::The actual headers are:")
    print(header)
Пример #2
0
    def _produce_annotations(self, inputs: Inputs) -> Outputs:
        """ generates dataframe with semantic type classifications and classification probabilities
            for each column of original dataframe

        Arguments:
            inputs {Inputs} -- D3M dataframe

        Returns:
            Outputs -- dataframe with two columns: "semantic type classifications" and "probabilities"
                       Each row represents a column in the original dataframe. The column "semantic type
                       classifications" contains a list of all semantic type labels and the column
                       "probabilities" contains a list of the model's confidence in assigning each
                       respective semantic type label
        """

        # load model checkpoint
        checkpoint_dir = (self._volumes["simon_models_1"] +
                          "/simon_models_1/pretrained_models/")
        if self.hyperparams["statistical_classification"]:
            execution_config = "Base.pkl"
            category_list = "/Categories.txt"
        else:
            execution_config = "Base_stat_geo.pkl"
            category_list = "/Categories_base_stat_geo.txt"
        with open(
                self._volumes["simon_models_1"] + "/simon_models_1" +
                category_list, "r") as f:
            Categories = f.read().splitlines()

        # create model object
        Classifier = Simon(encoder={})
        config = Classifier.load_config(execution_config, checkpoint_dir)
        encoder = config["encoder"]
        checkpoint = config["checkpoint"]
        model = Classifier.generate_model(20, self.hyperparams["max_rows"],
                                          len(Categories))
        Classifier.load_weights(checkpoint, None, model, checkpoint_dir)
        model.compile(loss="binary_crossentropy",
                      optimizer="adam",
                      metrics=["binary_accuracy"])

        # prepare data and make predictions
        frame = inputs.copy()
        prepped_data = encoder.encodeDataFrame(frame)
        preds = model.predict_on_batch(tf.constant(prepped_data))
        logger.debug('------------Reverse label encoding------------')
        decoded_preds = encoder.reverse_label_encode(
            preds, self.hyperparams["p_threshold"])

        # apply statistical / ordinal classification if desired
        if self.hyperparams["statistical_classification"]:
            logger.debug(
                "Beginning Guessing categorical/ordinal classifications...")
            raw_data = frame.values
            guesses = [
                guess(raw_data[:, i], for_types="category")
                for i in np.arange(raw_data.shape[1])
            ]

            # probability of rule-based statistical / ordinal classifications = min probability of existing classifications
            for i, g in enumerate(guesses):
                if g[0] == "category":
                    if len(decoded_preds[1][i]) == 0:
                        guess_prob = self.hyperparams['p_threshold']
                    else:
                        guess_prob = min(decoded_preds[1][i])
                    decoded_preds[0][i] += ("categorical", )
                    decoded_preds[1][i].append(guess_prob)
                    if (("int" in decoded_preds[1][i])
                            or ("float" in decoded_preds[1][i])
                            or ("datetime" in decoded_preds[1][i])):
                        decoded_preds[0][i] += ("ordinal", )
                        decoded_preds[1][i].append(guess_prob)
            logger.debug("Done with statistical variable guessing")

        # clear tf session, remove unnecessary files
        Classifier.clear_session()
        os.remove('unencoded_chars.json')

        out_df = pd.DataFrame.from_records(list(decoded_preds)).T
        out_df.columns = ["semantic types", "probabilities"]
        return out_df