コード例 #1
0
def main():
    dataset = np.loadtxt("data_for_real1.txt", delimiter=" ")
    X = dataset[0:-101, 0:2]
    Y = dataset[0:-101, 2:5]
    x_test = dataset[-101:-1, 0:2]
    y_test = dataset[-101:-1, 2:5]

    epochs = 30
    batch_size = 20
    # Types of layers used
    c = ['relu', 'tanh', 'sigmoid']
    num_of_ouputs = 3
    input = Input(shape=(2, ))
    layers = Dense(100, activation=c[0])(input)
    layers = Dense(40, activation=c[1])(layers)
    layers = Dense(30, activation=c[1])(layers)
    layers = Dense(20, activation=c[2])(layers)
    output = Dense(num_of_ouputs, activation='sigmoid')(layers)
    model = Model(inputs=input, outputs=output)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    acc_his = MyCallbacks.AccHistoryEpochTest()
    loss_his_epoch = MyCallbacks.LossHistoryEpoch()
    val_loss_his = MyCallbacks.ValLossHistory()
    loss_his = MyCallbacks.ValLossHistory()
    all_measurements = MyCallbacks.LayersEmbeddingAllMeasurementsThreaded(
        epochs, 299, batch_size)

    model.fit(X,
              Y,
              epochs=epochs,
              batch_size=batch_size,
              validation_data=(x_test, y_test),
              callbacks=[
                  acc_his, val_loss_his, loss_his_epoch, loss_his,
                  all_measurements
              ],
              shuffle=True)

    print(
        hp.overfitting_all_values(
            loss_his_epoch.losses, val_loss_his.losses,
            hp.convergence_of_NN_val_loss(loss_his.losses, 4)))
    print(hp.convergence_of_NN_val_loss(loss_his.losses, 4))

    plt.plot(acc_his.losses, 'b-', acc_his.losses_val, 'r-')
    plt.show()
コード例 #2
0
def loss_nn_dense(args, x, y, x_test, y_test, act_fce, loss, optimizer, batch_size):

    depth = int(len(args)/2)

    x_dim, y_dim = x.shape[1], y.shape[1]

    model = Sequential()

    for layer in range(0, depth):
        #Each layer has 2 paramters in args array, number of hidden units, and index of actvation function
        if layer ==0:
            model.add(Dense(int(args[layer*2]), activation=act_fce[int(args[layer*2 + 1])], input_shape = (x_dim,)))
        else:
            model.add(Dense(int(args[layer*2]), activation=act_fce[int(args[layer*2 + 1])]))
    #Last layer
    model.add(Dense(y_dim, activation=act_fce[int(args[-1])]))
    #Loss and optimizer is set by user
    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    acc_his = MyCallbacks.AccHistoryEpochTest()
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=80, verbose=0, mode='min')
    #Batch_size is also set by user
    model.fit(x, y, epochs=300, batch_size=batch_size, validation_data=(x_test, y_test),
              callbacks=[acc_his, early_stopping], shuffle=True)

    conv_epoch = hp.convergence_of_NN_val_loss(acc_his.losses_val_losses, 4)
    if conv_epoch == 0:
        conv_epoch = 1
    diff_of_over_fitting_at_conv = acc_his.losses[conv_epoch - 1] - acc_his.losses_val[conv_epoch - 1]
    max_acc = max(acc_his.losses)
    max_val_acc = max(acc_his.losses_val)
    min_val_loss = min(acc_his.losses_val_losses)

    #Return the best possible test_loss
    return [min_val_loss, max_val_acc, max_acc, diff_of_over_fitting_at_conv, conv_epoch]
コード例 #3
0
def loss_nn_dense(args, act_fce, x, y, x_test, y_test, loss_fce, optimizer):
    depth = int(len(args) / 2)
    input_dim, output_dim = x.shape[1], y.shape[1]
    print(args)
    model = Sequential()
    for layer in range(0, depth):
        if layer == 0:
            model.add(
                Dense(int(args[layer * 2]),
                      activation=act_fce[int(args[layer * 2 + 1])],
                      input_shape=(input_dim, )))
        else:
            model.add(
                Dense(int(args[layer * 2]),
                      activation=act_fce[int(args[layer * 2 + 1])]))
    model.add(Dense(output_dim, activation=act_fce[int(args[-1])]))
    model.compile(loss=loss_fce, optimizer=optimizer, metrics=['accuracy'])

    acc_his = MyCallbacks.AccHistoryEpochTest()

    model.fit(x,
              y,
              epochs=1,
              batch_size=16,
              validation_data=(x_test, y_test),
              callbacks=[acc_his],
              shuffle=True)

    return min(acc_his.losses_val_losses)
コード例 #4
0
def loss_nn_dense(args):
    depth = int(len(args)/2)
    model = Sequential()
    for layer in range(0, depth):
        if layer ==0:
            model.add(Dense(int(args[layer*2]), activation=c[int(args[layer*2 + 1])], input_shape = (2,)))
        else:
            model.add(Dense(int(args[layer*2]), activation=c[int(args[layer*2 + 1])]))
    model.add(Dense(3, activation='tanh'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    acc_his = MyCallbacks.AccHistoryEpochTest()

    model.fit(x, y, epochs=200, batch_size=16, validation_data=(x_test, y_test),
              callbacks=[acc_his], shuffle=True)

    return min(acc_his.losses_val_losses)
                if layer == 0:
                    model.add(
                        Dense(int(layers_val[layer][0]),
                              activation=c[int(layers_val[layer][1])],
                              input_shape=(2, )))
                else:
                    model.add(
                        Dense(int(layers_val[layer][0]),
                              activation=c[int(layers_val[layer][1])]))

            model.add(Dense(num_of_ouputs, activation='sigmoid'))
            model.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])

            acc_his = MyCallbacks.AccHistoryEpochTest()

            model.fit(X,
                      Y,
                      epochs=200,
                      batch_size=16,
                      validation_data=(x_test, y_test),
                      callbacks=[acc_his],
                      shuffle=True)
            # Data Calculation
            conv_epoch.append(
                hp.convergence_of_NN_val_loss(acc_his.losses_val_losses, 4))
            diff_of_over_fitting_at_conv.append(
                acc_his.losses[conv_epoch[-1] - 1] -
                acc_his.losses[conv_epoch[-1] - 1])
            max_acc.append(max(acc_his.losses))
コード例 #6
0
ファイル: tes_nn.py プロジェクト: FrederikBrezina/SProject
            layers2 = layers(inputs)
            for layer in range(1, depth):
                layers = Dense(int(layers_val[layer][0]),
                               activation=c[int(layers_val[layer][1])])
                layers2 = layers(layers2)

            name = 'output{0:d}'.format(tries)
            output2 = Dense(num_of_ouputs, activation='sigmoid', name=name)
            output = output2(layers2)
            outputs.append(output)

        model = Model(inputs=inputs, outputs=outputs)
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        acc_his = MyCallbacks.MultiModelAcc(std_tries)
        loss_his = MyCallbacks.MultiModelLosses(std_tries)

        model.fit(X,
                  Y_list,
                  epochs=200,
                  batch_size=20,
                  validation_data=(x_test, Y_test),
                  callbacks=[acc_his, loss_his],
                  shuffle=True)
        # Data Calculation
        for num_of_output in range(0, std_tries):
            conv_epoch.append(
                hp.convergence_of_NN_val_loss(
                    loss_his.val_losses[num_of_output], 4))
            diff_of_over_fitting_at_conv.append(