Example #1
0
    def load_from_json(self, fname):
        # load the model
        import_data = json_tricks.load(open(fname))
        import_clf = ModifiedNB()
        import_clf.class_count_ = import_data['class_count_']
        import_clf.class_log_prior_ = import_data['class_log_prior_']
        import_clf.classes_ = import_data['classes_']
        import_clf.feature_count_ = import_data['feature_count_']
        import_clf.feature_log_prob_ = import_data['feature_log_prob_']
        self.clf = import_clf

        # load the fps dict vectoriser
        v_fps = DictVectorizer()
        dv = import_data['fps_vectoriser']
        v_fps.vocabulary_ = {int(k): v for k, v in dv['vocabulary_'].items()}
        v_fps.feature_names_ = dv['feature_names_']
        self.v_fps = v_fps

        # load the continous variables binariser
        try:
            binariser = import_data['binariser']
            kbd = KBinsDiscretizer(n_bins=10,
                                   encode='onehot',
                                   strategy='quantile')
            kbd.n_bins = binariser['n_bins']
            kbd.n_bins_ = binariser['n_bins_']
            kbd.bin_edges_ = np.asarray(
                [np.asarray(x) for x in binariser['bin_edges_']])
            encoder = OneHotEncoder()
            encoder.categories = binariser['categories']
            encoder._legacy_mode = False
            kbd._encoder = encoder
            self.kbd = kbd
        except Exception as e:
            pass

        # extra parameters
        self.trained = True
        self.con_desc_list = import_data['con_desc_list']
        self.fp_type = import_data['fp_type']
        self.fp_radius = import_data['fp_radius']
        self.informative_cvb = import_data['informative_cvb']
Example #2
0
x = dataset.iloc[:, :12].values
y = dataset.iloc[:, 12:13].values

print(len(x))
print(len(y))

sc = StandardScaler()

x1 = sc.fit_transform(x)

# # print(z)

from sklearn.preprocessing import OneHotEncoder

ohe = OneHotEncoder()
ohe.categories = 'auto'
z = ohe.fit_transform(y).toarray()

print(z)

from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x1, z, test_size=0.1)

from model_archi import *
model = model_archi.build(12, 4)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,