Пример #1
0
def main(datapath, email_index, execution_config, DEBUG):

    # set important parameters
    maxlen = 20
    max_cells = 500
    checkpoint_dir = "pretrained_models/"
    with open(checkpoint_dir + 'Categories_base.txt', 'r') as f:
        Categories = f.read().splitlines()
    category_count = len(Categories)

    # load specified execution configuration
    if execution_config is None:
        raise TypeError
    Classifier = Simon(encoder={})  # dummy text classifier
    config = Classifier.load_config(execution_config, checkpoint_dir)
    encoder = config['encoder']
    intermediate_model = Classifier.generate_feature_model(maxlen,
                                                           max_cells,
                                                           category_count,
                                                           checkpoint_dir,
                                                           config,
                                                           DEBUG=DEBUG)

    # load sample email
    with open(datapath) as data_file:
        emails = data_file.readlines()
    sample_email = json.loads(emails[int(email_index)])['body']
    if DEBUG:
        print('DEBUG::sample email:')
        print(sample_email)
    tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
    sample_email_sentence = tokenizer.tokenize(sample_email)
    sample_email_sentence = [elem[-maxlen:]
                             for elem in sample_email_sentence]  # truncate
    all_email_df = pd.DataFrame(sample_email_sentence, columns=['Email 0'])
    if DEBUG:
        print('DEBUG::the final shape is:')
        print(all_email_df.shape)
    all_email_df = all_email_df.astype(str)
    raw_data = np.asarray(all_email_df.ix[:max_cells -
                                          1, :])  #truncate to max_cells
    raw_data = np.char.lower(np.transpose(raw_data).astype('U'))

    # encode data
    X = encoder.x_encode(raw_data, maxlen)

    # generate features for email
    y = intermediate_model.predict(X)
    # discard empty column edge case
    y[np.all(all_email_df.isnull(), axis=0)] = 0

    # print and return result
    print('\n128-d Simon Feature Vector:\n')
    print(y[0])
    return y[0]
Пример #2
0
#print("Ten Spam emails after Processing (in DataFrame form) are:")
#print((SpamEmails[:10]))
print("Spam email dataframe after Processing shape:")
print(SpamEmails.shape)

# orient the user a bit
with open('pretrained_models/Categories.txt', 'r') as f:
    Categories = f.read().splitlines()
print("former categories are: ")
Categories = sorted(Categories)
print(Categories)
category_count_prior = len(Categories)

# Load pretrained model via specified execution configuration
Classifier = Simon(encoder={})  # dummy text classifier
config = Classifier.load_config(execution_config, checkpoint_dir)
encoder = config['encoder']
checkpoint = config['checkpoint']

# Encode labels and data
Categories = ['spam', 'notspam']
category_count = len(Categories)
encoder.categories = Categories
header = ([[
    'spam',
]] * Nsamp)
header.extend(([[
    'notspam',
]] * Nsamp))

#print(header)
Пример #3
0
    def _produce_annotations(self, inputs: Inputs) -> Outputs:
        """ generates dataframe with semantic type classifications and classification probabilities
            for each column of original dataframe

        Arguments:
            inputs {Inputs} -- D3M dataframe

        Returns:
            Outputs -- dataframe with two columns: "semantic type classifications" and "probabilities"
                       Each row represents a column in the original dataframe. The column "semantic type
                       classifications" contains a list of all semantic type labels and the column
                       "probabilities" contains a list of the model's confidence in assigning each
                       respective semantic type label
        """

        # load model checkpoint
        checkpoint_dir = (self._volumes["simon_models_1"] +
                          "/simon_models_1/pretrained_models/")
        if self.hyperparams["statistical_classification"]:
            execution_config = "Base.pkl"
            category_list = "/Categories.txt"
        else:
            execution_config = "Base_stat_geo.pkl"
            category_list = "/Categories_base_stat_geo.txt"
        with open(
                self._volumes["simon_models_1"] + "/simon_models_1" +
                category_list, "r") as f:
            Categories = f.read().splitlines()

        # create model object
        Classifier = Simon(encoder={})
        config = Classifier.load_config(execution_config, checkpoint_dir)
        encoder = config["encoder"]
        checkpoint = config["checkpoint"]
        model = Classifier.generate_model(20, self.hyperparams["max_rows"],
                                          len(Categories))
        Classifier.load_weights(checkpoint, None, model, checkpoint_dir)
        model.compile(loss="binary_crossentropy",
                      optimizer="adam",
                      metrics=["binary_accuracy"])

        # prepare data and make predictions
        frame = inputs.copy()
        prepped_data = encoder.encodeDataFrame(frame)
        preds = model.predict_on_batch(tf.constant(prepped_data))
        logger.debug('------------Reverse label encoding------------')
        decoded_preds = encoder.reverse_label_encode(
            preds, self.hyperparams["p_threshold"])

        # apply statistical / ordinal classification if desired
        if self.hyperparams["statistical_classification"]:
            logger.debug(
                "Beginning Guessing categorical/ordinal classifications...")
            raw_data = frame.values
            guesses = [
                guess(raw_data[:, i], for_types="category")
                for i in np.arange(raw_data.shape[1])
            ]

            # probability of rule-based statistical / ordinal classifications = min probability of existing classifications
            for i, g in enumerate(guesses):
                if g[0] == "category":
                    if len(decoded_preds[1][i]) == 0:
                        guess_prob = self.hyperparams['p_threshold']
                    else:
                        guess_prob = min(decoded_preds[1][i])
                    decoded_preds[0][i] += ("categorical", )
                    decoded_preds[1][i].append(guess_prob)
                    if (("int" in decoded_preds[1][i])
                            or ("float" in decoded_preds[1][i])
                            or ("datetime" in decoded_preds[1][i])):
                        decoded_preds[0][i] += ("ordinal", )
                        decoded_preds[1][i].append(guess_prob)
            logger.debug("Done with statistical variable guessing")

        # clear tf session, remove unnecessary files
        Classifier.clear_session()
        os.remove('unencoded_chars.json')

        out_df = pd.DataFrame.from_records(list(decoded_preds)).T
        out_df.columns = ["semantic types", "probabilities"]
        return out_df