Ejemplo n.º 1
0
def cross_validation(ds):
    alg.reset_algorithm([
        svmObj, rfcObj, gbcObj, logObj, knnObj, mlpObj, gnbObj, ensObj,
        apriObj, astaltObj, fib4Obj, annObj
    ])
    reset_dataset(ds)
    c = 0

    if (ds.split == 'groupKFold'):
        splitMethod = kf.split(ds.X, ds.Y, ds.MRNs.astype(int))
    elif (ds.split == 'KFold'):
        splitMethod = normalKF.split(ds.X, ds.Y)

    for train_index, test_index in splitMethod:
        print(c)
        preprocessing(ds, train_index, test_index, c)

        alg.svm_class(ds.X_tr_imp_scl[c][:, svmObj.features],
                      ds.X_ts_imp_scl[c][:, svmObj.features], ds.Y_tr[c],
                      ds.Y_ts[c], svmObj, c)
        alg.rfc_class(ds.X_tr_imp_scl[c][:, rfcObj.features],
                      ds.X_ts_imp_scl[c][:, rfcObj.features], ds.Y_tr[c],
                      ds.Y_ts[c], rfcObj, c)
        alg.gbc_class(ds.X_tr_imp_scl[c][:, gbcObj.features],
                      ds.X_ts_imp_scl[c][:, gbcObj.features], ds.Y_tr[c],
                      ds.Y_ts[c], gbcObj, c)
        alg.log_class(ds.X_tr_imp_scl[c][:, logObj.features],
                      ds.X_ts_imp_scl[c][:, logObj.features], ds.Y_tr[c],
                      ds.Y_ts[c], logObj, c)
        alg.knn_class(ds.X_tr_imp_scl[c][:, knnObj.features],
                      ds.X_ts_imp_scl[c][:, knnObj.features], ds.Y_tr[c],
                      ds.Y_ts[c], knnObj, c)
        alg.mlp_class(ds.X_tr_imp_scl[c][:, mlpObj.features],
                      ds.X_ts_imp_scl[c][:, mlpObj.features], ds.Y_tr[c],
                      ds.Y_ts[c], mlpObj, c)
        alg.gnb_class(ds.X_tr_imp_scl[c][:, gnbObj.features],
                      ds.X_ts_imp_scl[c][:, gnbObj.features], ds.Y_tr[c],
                      ds.Y_ts[c], gnbObj, c)
        ann_class(ds.X_tr_imp_scl[c][:, annObj.features],
                  ds.X_ts_imp_scl[c][:, annObj.features], ds.Y_tr[c] / 4,
                  ds.Y_ts[c], annObj, c)
        alg.ens_class(svmObj, rfcObj, gbcObj, logObj, knnObj, mlpObj, gnbObj,
                      ds.Y_ts[c], ensObj, c)
        alg.apri_class(ds.X_ts_imp[c][:, apriObj.features], ds.Y_ts[c],
                       apriObj, c)
        alg.astalt_class(ds.X_ts_imp[c][:, astaltObj.features], ds.Y_ts[c],
                         astaltObj, c)
        alg.fib4_class(ds.X_ts_imp[c][:, fib4Obj.features], ds.Y_ts[c],
                       fib4Obj, c)
        c = c + 1

    # This is the point where I need to get all the AUROCs
    print_results(algorithmArray)
    print_table(
        algorithmArray, len(list(itertools.chain.from_iterable(ds.ts_indxs))),
        True
    )  # The second argument indicates whether uncertanties should be printed here
Ejemplo n.º 2
0
def find_top_similar_ask1(q, qlist):
    #使用tf-idf的方法
    q = data.preprocessing([q])
    vectorizer = TfidfVectorizer()
    #方法1:使输入与全部问题库的信息作比较
    #    x = vectorizer.fit_transform(qlist)
    #    input_vec = vectorizer.transform(q)
    #方法2:从倒排表中取出相关联的索引,仅与有相同字的问题作比较
    index_list = []
    invert_table = load_inverse_table()
    for c in cut(q):
        for i in c:
            if i in invert_table.keys():
                values = invert_table[i]
                for value in values:
                    index_list.append(int(value))


#                index_list += invert_table[i]
    index_list = list(set(index_list))
    qlist = np.asarray(qlist)
    x = qlist[index_list]
    x = vectorizer.fit_transform(x)
    input_vec = vectorizer.transform(q)
    res = cosine_similarity(input_vec, x)
    n = np.argmax(res)
    return n
Ejemplo n.º 3
0
def gen(target_molecules):
    for i in range(0, len(target_molecules)):
        mol = target_molecules[i]
        while (len(mol) > 50):
            mol = mol[:-1]
            target_molecules[i] = mol

    # Create preprocessing instance
    pp = preprocessing()

    # Load data from small data set
    X_train, y_train, X_test, y_test = pp.load_data()

    # Remove non characters
    for i in range(0, len(target_molecules)):
        mol = target_molecules[i]
        for char in mol:
            if char in pp.charset:
                print("We good.")
            else:
                mol = mol.replace(char, '')
                target_molecules[i] = mol
                print("Oopps. Removing bad char:", char)

    # Create & load model
    model = nn(X_train, y_train, X_test, y_test)
    model.load(pp)

    # Generate a molecule
    molecules = model.generate(target=target_molecules,
                               preprocessing_instance=pp,
                               hit_rate=20)
    return molecules
Ejemplo n.º 4
0
def train():
    data_gen_args = dict(rotation_range=90,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         zoom_range=0.2)
    image_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
        **data_gen_args)
    mask_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
        **data_gen_args)

    seed = 1
    model = VariationalUnet()
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    global_step = tf.Variable(0)

    num_epochs = 50
    batch_size = 4

    image_generator = image_datagen.flow_from_directory(
        'data/membrane/train/images/',
        class_mode=None,
        batch_size=batch_size,
        seed=seed)

    mask_generator = mask_datagen.flow_from_directory(
        'data/membrane/train/masks/',
        class_mode=None,
        batch_size=batch_size,
        seed=seed)

    train_generator = zip(image_generator, mask_generator)

    for epoch in range(num_epochs):
        print("Epoch: ", epoch)
        for (img, mask) in train_generator:
            img, mask = preprocessing(img, mask)

            loss_value, grads, reconstruction, mus_sigmas = grad(
                model, img, mask)
            optimizer.apply_gradients(zip(grads, model.trainable_variables),
                                      global_step)

            if global_step.numpy() % 1 == 0:
                print("Step: {},         Loss: {}".format(
                    global_step.numpy(),
                    loss(mask, reconstruction, mus_sigmas).numpy()))
Ejemplo n.º 5
0
def find_top_similar_ask2(q, qlist):
    #使用glove的方法
    #输入问题q,返回答案
    # 1 将问题和问题库里的问题进行embedding,转换成句子向量
    # 2 从倒排表里筛选候选
    # 3 计算问题与候选问题的相似度
    # 4 选取相似度取值最大的
    q = data.preprocessing([q])
    q = "".join(q)
    emb, vocab = get_glove_data()
    emb = np.asarray(emb)
    # 输入问题的vec值
    q_vec = get_words_vec(q, emb, vocab)
    qlist_vec = []
    i = 0
    for qlist_ in qlist:
        qlist_vec.append(get_words_vec(qlist_, emb, vocab))
        i += 1
        if i % 5000 == 0:
            print(i)
    qlist_vec = np.asarray(qlist_vec)
    abs_v = abs(qlist_vec[0])
    # 存储所有返回结果的索引
    res = []
    # 从倒排表中取出相关联的索引
    index_list = []
    invert_table = load_inverse_table()
    for c in cut([q]):
        for i in c:
            if i in invert_table.keys():
                values = invert_table[i]
                for value in values:
                    index_list.append(int(value))
#                index_list += invert_table[i]
    index_list = list(set(index_list))
    # 遍历倒排表内所有值,将list中所有vec值与input_q的vec值做对比,绝对值差较小的数的索引存入res中
    #    for value in qlist_vec[index_list]:
    #        if abs(q_vec - value) < abs_v:
    #            abs_v = abs(q_vec - value)
    #            res.append(qlist_vec[index_list].tolist().index(value))
    values = qlist_vec[index_list]
    res = cosine_similarity(q_vec, values)
    n = np.argmax(res)

    return n
Ejemplo n.º 6
0
def detect_event(n_point_df,
                 columns=['app1'],
                 detect_position=4,
                 median_window=7,
                 N=4,
                 n=10,
                 niter=100,
                 kappa=35,
                 gamma=0.15,
                 state_threshold=70,
                 noise_level=80):

    var_med_an_npdf = preprocessing(n_point_df,
                                    columns=columns,
                                    m=median_window,
                                    S=1,
                                    N=N,
                                    niter=niter,
                                    kappa=kappa,
                                    gamma=gamma)

    [cl_steady,
     cl_transients] = find_steady_states(var_med_an_npdf,
                                         n_point=True,
                                         columns=columns,
                                         n=detect_position,
                                         state_threshold=state_threshold,
                                         noise_level=noise_level)

    n_point_df = var_med_an_npdf

    if cl_transients.empty:
        detected = False
    else:
        detected = True

    return cl_transients, detected
Ejemplo n.º 7
0
if __name__ == '__main__':

    # check for input arguments
    if len(sys.argv) == 1:
        print(
            'No arguments passed. Please specify classification method ("log", "nn" or "linreg").'
        )
        sys.exit()

    arg = sys.argv[1]

    if arg == 'log':

        # initialise features and targets using credit card data
        X, y = data.preprocessing(remove_data=True)

        store_acc_train = []
        store_acc_test = []
        list_of_etas = [1E-4, 1E-3, 1E-2, 1E-1, 1]
        for eta in list_of_etas:
            lr = LogisticRegression(X,
                                    y,
                                    eta=eta,
                                    minibatch_size=100,
                                    epochs=100,
                                    folds=10,
                                    benchmark=False)
            lr.logistic_regression()
            store_acc_train.append(lr.acc_epoch_train)
            store_acc_test.append(lr.acc_epoch_test)