Exemple #1
0
def search():
    '''search over our hyperparameters for something maybe usefull'''
    # generate ranges for our hyper-params
    r_dim = list(range(40, 121, 15))
    r_width = list(range(40, 481, 80))
    r_depth = list(range(1, 10, 2))

    # generate permutations
    cross = [(i, j, k) for i in r_dim for j in r_width for k in r_depth]

    # and run hyper for all permutations, store all the results in db
    import sqlite3
    hyper_db = pd.DataFrame(columns=['dim', 'width', 'depth', 'acc', 'loss'])
    for (di, wi, de) in cross:
        # wrapped in try just to make sure
        try:
            loss, acc = hyper(di, wi, de)
            # open db
            conn = sqlite3.connect('hyper.db')
            c = conn.cursor()
            # insert results
            query = 'INSERT INTO performances VALUES ({}, {}, {}, {}, {});'.format(
                di, wi, de, loss, acc)
            c.execute(query)
            # close connection
            conn.commit()
            conn.close()
        except Exception:
            print('error for params: {}, {}, {}'.format(di, wi, de))
            quicksend('error for params: {}, {}, {}'.format(di, wi, de))
Exemple #2
0
for model in models:
    model.compile(optimizer='Adadelta',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

for i in range(1, 11):
    # update vars to save new predictions
    predictions[i] = []
    results[i] = {}

    # train 100 epochs for every model
    # then make predictions
    for model in models:
        model.fit(X, y, epochs=100, batch_size=120, verbose=1)
        results[i][model] = model.predict(test)
        if predictions[i] == []:
            predictions[i] = results[i][model]
        else:
            predictions[i] += results[i][model]

    # now get model_avg prediction, print them to csv
    y_pred = np.argmax(predictions[i], axis=1)

    resf = pd.DataFrame({'Id': index, 'y': y_pred})
    resf.to_csv('res_model_avg_{}_iter.csv'.format(i * 100), index=False)
    print('maniggli done with {} iterations'.format(i * 100))
    try:
        quicksend('maniggli done with {} iterations'.format(i * 100))
    except Exception:
        pass
Exemple #3
0
def hyper(dim: int, depth: int, width: int):
    LOGGER.info('starting main for {} dimensions'.format(dim))

    # read data
    train = pd.read_hdf("train.h5", "train")
    test = pd.read_hdf("test.h5", "test")
    index = test.index

    ##################################################
    # # # preprocess data (scaling and encoding) # # #
    ##################################################
    X = train.drop(['y'], axis=1).values
    y = train.pop('y').values
    test = test.values

    Scaler = StandardScaler()
    X = Scaler.fit_transform(X)
    test = Scaler.transform(test)

    enc_dim = dim  # enc_dim: dimension to encode to

    # create input layer
    i_layer = Input(shape=(120, ))

    # # create intermediate encoding layer
    # interm_dim = 120
    # interm_enc = Dense(interm_dim, activation = 'sigmoid')(i_layer)

    # create encoded layer
    e_layer = Dense(enc_dim, activation='relu')(i_layer)
    # e_layer = Dense(enc_dim, activation = 'sigmoid')(interm_enc)

    # # create intermediate decoding layer
    # interm_dec = Dense(interm_dim, activation = 'sigmoid')(e_layer)

    # create decoded layer
    d_layer = Dense(120)(e_layer)
    # d_layer = Dense(120)(interm_dec)

    # create auto-encoder, the model that maps input straight to output
    auto_encoder = Model(i_layer, d_layer)

    # encoder: map input to lower dimension
    encoder = Model(i_layer, e_layer)

    # # create model for decoding
    # enc_input = Input(shape = (enc_dim,))
    # dec_layer = auto_encoder.layers[-1]
    # decoder = Model(enc_input, dec_layer(enc_input))

    # now let's train!
    # NOTE: we encode our entire X!
    auto_encoder.compile(optimizer='adadelta', loss='binary_crossentropy')
    auto_encoder.fit(X, X, epochs=25)

    # and now we can encode our data:
    X = encoder.predict(X)
    test = encoder.predict(test)

    # update user
    print('encoding done!')

    # now we do regular prediction on encoded data, if the local-flag is set to True
    X_train, X_test, y_train, y_test = (train_test_split(
        X, y, test_size=0.33, random_state=11312) if Local else
                                        (X, y, test, test))

    ################################################
    # # # create model from given hyper-params # # #
    ################################################
    # array of layers, first layer takes enc_dim inputs
    layerz = [
        keras.layers.Dense(dim, activation=tf.nn.relu, input_dim=enc_dim)
    ]
    for i in range(1, depth):
        layerz.append(
            keras.layers.Dense(dim, activation=tf.nn.relu, input_dim=dim))
    # append last layer, that only outputs 5 weights
    layerz.append(keras.layers.Dense(5, activation=tf.nn.softmax))
    # keras.layers.Dense(5, activation=tf.nn.softmax)

    # Model 2 from earlier
    model = keras.Sequential(layerz)
    model.name = 'hyper_{}_{}_{}'.format(dim, depth, width)

    model.compile(
        optimizer='adam',
        loss='sparse_categorical_crossentropy'
        # , loss='sparse_categorical_crossentropy'
        # NOTE: is this the correct metrics?
        ,
        metrics=['accuracy'])

    model.fit(X_train, y_train, epochs=100)

    LOGGER.info('Done, {} now'.format('evaluating' if Local else 'predicting'))

    if Local:
        # if local is set to True: evluate on local data
        results = model.evaluate(X_test, y_test)
        print("done :D")
        quicksend("done :D, for: {}, {}, {}:".format(dim, depth, width))
        print(results)
        quicksend(results)

        LOGGER.info('evaluation done, results:')
        LOGGER.info(results)
        now = dt.now()
        LOGGER.info(
            'timestamp: ' +
            '{}, {}, {}\n\n'.format(now.month, now.day, now.hour, now.minute))
        return results
    else:
        # otherwise predict test-set and print to csv
        y_pred = model.predict_classes(test)
        resf = pd.DataFrame({'Id': index, 'y': y_pred})

        # get the filename:
        # import datetime.datetime as dt
        now = dt.now()
        filename = 'Results_task3_{}_{}_{}_{}.csv'.format(
            now.month, now.day, now.hour, now.minute)
        resf.to_csv(filename, index=False)
        print('Done')
Exemple #4
0
    # generate permutations
    cross = [(i, j, k) for i in r_dim for j in r_width for k in r_depth]

    # and run hyper for all permutations, store all the results in db
    import sqlite3
    hyper_db = pd.DataFrame(columns=['dim', 'width', 'depth', 'acc', 'loss'])
    for (di, wi, de) in cross:
        # wrapped in try just to make sure
        try:
            loss, acc = hyper(di, wi, de)
            # open db
            conn = sqlite3.connect('hyper.db')
            c = conn.cursor()
            # insert results
            query = 'INSERT INTO performances VALUES ({}, {}, {}, {}, {});'.format(
                di, wi, de, loss, acc)
            c.execute(query)
            # close connection
            conn.commit()
            conn.close()
        except Exception:
            print('error for params: {}, {}, {}'.format(di, wi, de))
            quicksend('error for params: {}, {}, {}'.format(di, wi, de))


if __name__ == '__main__':
    quicksend('starting now')
    search()
    LOGGER.info('all done')
    quicksend('all done')
Exemple #5
0
for model in models:
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

for i in range(1, 11):
    # update vars to save new predictions
    predictions[i] = []
    results[i] = {}

    # train 100 epochs for every model
    # then make predictions
    for model in models:
        model.fit(X, y, epochs=100, batch_size=120, verbose=1)
        results[i][model] = model.predict(test)
        if predictions[i] == []:
            predictions[i] = results[i][model]
        else:
            predictions[i] += results[i][model]

    # now get model_avg prediction, print them to csv
    y_pred = np.argmax(predictions[i], axis=1)

    resf = pd.DataFrame({'Id': index, 'y': y_pred})
    resf.to_csv('res_model_avg_{}_iter.csv'.format(i * 100), index=False)
    print('Done with {} iterations'.format(i * 100))
    try:
        quicksend('Done with {} iterations'.format(i * 100))
    except Exception:
        pass
Exemple #6
0
model.add(Dropout(0.4))
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(5, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='Nadam', metrics=['accuracy'])

# model.fit(X, y, batch_size=batch_size, epochs=2000, verbose=1)
model.fit(X_test, y_test, batch_size=batch_size, epochs=1000, verbose=1)

perf = model.evaluate(X_test, y_test)
print('done')
print(perf)

try:
    quicksend('done')
    quicksend(perf)
except Exception: pass

exit()

dataYPredict = model.predict(test)
y_pred = np.argmax(dataYPredict, axis=1)

resf = pd.DataFrame({'Id': index, 'y': y_pred})
resf.to_csv('res_2-1.csv', index = False)
print('Done')