コード例 #1
0
def main():
    sys.path.insert(0, '/home/osboxes/DeepLearningResearch/Classification')
    import keras_models
    epochs = 16
    batch_size = 10
    neurons = 50
    optimizer = 'nadam'
    weight_constraint = 5
    dropout_rate = .10

    model = keras_models.create_one_layer()
    plot_model(model, to_file='./figures/oneLayer.png', show_shapes=True)
コード例 #2
0
def main():
    sys.path.insert(
        0,
        'C:/Users/jarre/OneDrive/Documents/Research/Fall2017Git/Classification'
    )
    import keras_models
    epochs = 16
    batch_size = 10
    neurons = 50
    optimizer = 'nadam'
    weight_constraint = 5
    dropout_rate = .10

    model = keras_models.create_one_layer()
    plot_model(model, to_file='./figures/oneLayer.png', show_shapes=True)
コード例 #3
0
def final_test(args, perm_inputs, feat_inputs, comb_inputs, labels):
    '''
    performs final test and validation across each train ratio on hardcoded
    hyperparemeter values established by the gridsearch
    '''
    perm_width = int(len(perm_inputs[0]))
    feat_width = int(len(feat_inputs[0]))
    comb_width = int(len(comb_inputs[0]))
    print('perm width: ' + str(perm_width))
    input_ratios = args["input_ratio"]
    models = args["model"]
    size = 32

    #models = {'oneLayer_comb':oneLayer_comb, 'oneLayer_perm':oneLayer_perm, \
    #'oneLayer_feat':oneLayer_feat, 'dual_simple':dual_simple, 'dual_large':dual_large}
    #models = ('oneLayer_comb', 'oneLayer_feat', 'oneLayer_perm', 'dual_simple', 'dual_large')

    for m in models:
        print(m)
        data = []
        for r in args["train_ratio"]:
            percent = float(r) / 100
            #stratified shuffle split used for cross validation
            sss = StratifiedShuffleSplit(n_splits=5,
                                         random_state=0,
                                         test_size=1 - percent)
            cm = np.zeros([2, 2], dtype=np.int64)
            train_time = 0.0
            test_time = 0.0
            ir = 0
            for train_index, test_index in sss.split(perm_inputs, labels):
                perm_train, perm_test = perm_inputs[train_index], perm_inputs[
                    test_index]
                feat_train, feat_test = feat_inputs[train_index], feat_inputs[
                    test_index]
                comb_train, comb_test = comb_inputs[train_index], comb_inputs[
                    test_index]
                labels_train, labels_test = labels[train_index], labels[
                    test_index]

                if m == "oneLayer_comb":
                    print('oneLayer_comb')
                    model = create_one_layer(optimizer='nadam',
                                             data_width=comb_width,
                                             neurons=32)
                    epoch = 32
                    batch = 32
                    time0 = timeit.default_timer()
                    model.fit(comb_train,
                              labels_train,
                              epochs=epoch,
                              batch_size=batch)
                    time1 = timeit.default_timer()
                    labels_pred = model.predict(comb_test, batch_size=batch)
                    time2 = timeit.default_timer()

                elif m == "oneLayer_perm":
                    print('oneLayer_perm')
                    model = create_one_layer(optimizer='nadam',
                                             data_width=perm_width,
                                             neurons=32)
                    batch = 32
                    epoch = 16
                    time0 = timeit.default_timer()
                    model.fit(perm_train,
                              labels_train,
                              epochs=epoch,
                              batch_size=batch)
                    time1 = timeit.default_timer()
                    print(time1 - time0)
                    labels_pred = model.predict(perm_test, batch_size=batch)
                    time2 = timeit.default_timer()
                    print(time2 - time1)

                elif m == "oneLayer_feat":
                    print('oneLayer_feat')
                    model = create_one_layer(optimizer='nadam',
                                             data_width=feat_width,
                                             neurons=32)
                    batch = 16
                    epoch = 32
                    time0 = timeit.default_timer()
                    model.fit(feat_train,
                              labels_train,
                              epochs=epoch,
                              batch_size=batch)
                    time1 = timeit.default_timer()
                    labels_pred = model.predict(feat_test, batch_size=batch)
                    time2 = timeit.default_timer()

                elif m == "dual_simple":
                    print('dual_simple')
                    model = create_dualInputSimple(input_ratio=.125,
                                                   neurons=32,
                                                   perm_width=perm_width,
                                                   feat_width=feat_width)
                    batch = 16
                    epoch = 32
                    ir = .125
                    print(
                        "args: batch=%i, epochs=%i, ir=%f, perm_width=%i, feat_width=%i"
                        % (batch, epoch, ir, perm_width, feat_width))
                    print(type(perm_width))
                    print(type(feat_width))
                    model = create_dualInputSimple(input_ratio=ir, neurons=size, \
                    perm_width=perm_width, feat_width=feat_width)
                    time0 = timeit.default_timer()
                    model.fit([perm_train, feat_train],
                              labels_train,
                              epochs=epoch,
                              batch_size=batch)
                    time1 = timeit.default_timer()
                    labels_pred = model.predict([perm_test, feat_test],
                                                batch_size=batch)
                    time2 = timeit.default_timer()

                elif m == "dual_large":
                    print('dual_large')
                    model = create_dualInputLarge(input_ratio=.125,
                                                  neurons=32,
                                                  perm_width=perm_width,
                                                  feat_width=feat_width)
                    batch = 128
                    epoch = 32
                    ir = .125
                    model = create_dualInputLarge(dropout_rate=.1, neurons=size,\
                    input_ratio=ir, perm_width=perm_width, feat_width=feat_width)
                    time0 = timeit.default_timer()
                    model.fit([perm_train, feat_train],
                              labels_train,
                              epochs=epoch,
                              batch_size=batch)
                    time1 = timeit.default_timer()
                    labels_pred = model.predict([perm_test, feat_test],
                                                batch_size=batch)
                    time2 = timeit.default_timer()

                train_time += time1 - time0
                test_time += time2 - time1
                labels_pred = (labels_pred > 0.5)
                cm = cm + confusion_matrix(labels_test, labels_pred)
            acc = calc_accuracy(cm)
            prec = calc_precision(cm)
            rec = calc_recall(cm)
            f1 = calc_f1(prec, rec)
            avg_train_time = train_time / 5
            avg_test_time = test_time / 5

            data.append(dict(zip(["model_name", "neurons", "train_ratio", "input_ratio", \
            "epochs", "batch_size", "accuracy", "precision", "recall", "f1_score", \
            "avg_train_time", "avg_test_time"], \
            [m, size, r, ir, epoch, batch, acc, prec, rec, f1, avg_train_time, avg_test_time])))

        print('saving results for model: ' + str(m))
        save_results(data, m, model, args["save"])
コード例 #4
0
def grid_search(args, perm_inputs, feat_inputs, comb_inputs, labels):
    '''
    The below method is a modified implementation of the gridsearch method in
    KTFBinClass.py that manually iterates through all params via nested loops
    this method has been created to allow for multi_input neural networks
    '''
    perm_width = int(len(perm_inputs[0]))
    feat_width = int(len(feat_inputs[0]))
    comb_width = int(len(comb_inputs[0]))
    input_ratios = args["input_ratio"]
    splits = args["splits"]
    epochs = args["epochs"]
    batch_size = args["batch_size"]
    neurons = args["neurons"]
    modelName = args["model"]
    spits = args["splits"]
    single = None

    for m in modelName:

        data = []
        if m in ["oneLayer_perm", "oneLayer_feat", "oneLayer_comb"]:
            print('single bool set')
            single = True
        else:
            single = False

        for r in args["train_ratio"]:
            percent = float(r) / 100
            print(percent)
            sss = StratifiedShuffleSplit(n_splits=5,
                                         random_state=0,
                                         test_size=1 - percent)
            for epoch in epochs:
                for batch in batch_size:
                    for size in neurons:
                        cm = np.zeros([2, 2], dtype=np.int64)

                        if (single):
                            print ('model: ' + str(m) + ' tr: ' + str(r) + ' epochs: ' + str(epoch) + ' bs: ' \
                            + str(batch) + ' n: ' + str(size))
                            for train_index, test_index in sss.split(
                                    perm_inputs, labels):
                                perm_train, perm_test = perm_inputs[
                                    train_index], perm_inputs[test_index]
                                feat_train, feat_test = feat_inputs[
                                    train_index], feat_inputs[test_index]
                                comb_train, comb_test = comb_inputs[
                                    train_index], comb_inputs[test_index]
                                labels_train, labels_test = labels[
                                    train_index], labels[test_index]

                                if m == "oneLayer_perm":
                                    model = create_one_layer(
                                        optimizer='nadam',
                                        data_width=perm_width,
                                        neurons=size)
                                elif m == "oneLayer_feat":
                                    model = create_one_layer(
                                        optimizer='nadam',
                                        data_width=feat_width,
                                        neurons=size)
                                elif m == "oneLayer_comb":
                                    model = create_one_layer(
                                        optimizer='nadam',
                                        data_width=comb_width,
                                        neurons=size)

                                if m == "oneLayer_perm":
                                    print("single_input: " + str(m))
                                    model.fit(perm_train,
                                              labels_train,
                                              epochs=epoch,
                                              batch_size=batch)
                                    labels_pred = model.predict(
                                        perm_test, batch_size=batch)

                                elif m == "oneLayer_feat":
                                    print("single_input: " + str(m))
                                    model.fit(feat_train,
                                              labels_train,
                                              epochs=epoch,
                                              batch_size=batch)
                                    labels_pred = model.predict(
                                        feat_test, batch_size=batch)

                                elif m == "oneLayer_comb":
                                    print("single_input: " + str(m))
                                    model.fit(comb_train,
                                              labels_train,
                                              epochs=epoch,
                                              batch_size=batch)
                                    labels_pred = model.predict(
                                        comb_test, batch_size=batch)
                                labels_pred = (labels_pred > 0.5)
                                cm = cm + confusion_matrix(
                                    labels_test, labels_pred)
                            acc = calc_accuracy(cm)
                            prec = calc_precision(cm)
                            rec = calc_recall(cm)
                            f1 = calc_f1(prec, rec)
                            ir = 0
                            data.append(dict(zip(["model_name", "neurons", "train_ratio", "input_ratio", \
                            "epochs", "batch_size", "accuracy", "precision", "recall", "f1_score"], \
                            [m, size, r, ir, epoch, batch, acc, prec, rec, f1])))

                        else:
                            #else block added to include input_ratio as parameter for dual_input models
                            print('ENTERED ELSE - MULTI')

                            for ir in input_ratios:
                                print ('model: ' + str(m) + ' tr: ' + str(r) + ' epochs: ' + str(epoch) + ' bs: ' \
                                + str(batch) + ' n: ' + str(size))
                                for train_index, test_index in sss.split(
                                        perm_inputs, labels):
                                    perm_train, perm_test = perm_inputs[
                                        train_index], perm_inputs[test_index]
                                    feat_train, feat_test = feat_inputs[
                                        train_index], feat_inputs[test_index]
                                    comb_train, comb_test = comb_inputs[
                                        train_index], comb_inputs[test_index]
                                    labels_train, labels_test = labels[
                                        train_index], labels[test_index]

                                    if m == "dual_simple":
                                        print(
                                            "args: batch=%i, epochs=%i, ir=%f, perm_width=%i, feat_width=%i"
                                            % (batch, epoch, ir, perm_width,
                                               feat_width))
                                        model = create_dualInputSimple(input_ratio=ir, neurons=size, \
                                        perm_width=perm_width, feat_width=feat_width)
                                    elif m == "dual_large":
                                        model = create_dualInputLarge(dropout_rate=.1, neurons=size,\
                                        input_ratio=ir, perm_width=perm_width, feat_width=feat_width)

                                    print("multi_input: " + str(m))
                                    model.fit([perm_train, feat_train],
                                              labels_train,
                                              epochs=epoch,
                                              batch_size=batch)
                                    labels_pred = model.predict(
                                        [perm_test, feat_test],
                                        batch_size=batch)

                                    labels_pred = (labels_pred > 0.5)
                                    cm = cm + confusion_matrix(
                                        labels_test, labels_pred)
                                acc = calc_accuracy(cm)
                                prec = calc_precision(cm)
                                rec = calc_recall(cm)
                                f1 = calc_f1(prec, rec)

                                data.append(dict(zip(["model_name", "neurons", "train_ratio", "input_ratio", \
                                "epochs", "batch_size", "accuracy", "precision", "recall", "f1_score"], \
                                [m, size, r, ir, epoch, batch, acc, prec, rec, f1])))

        print('saving results for model: ' + str(m))
        save_results(data, m, model, False)
    return