Beispiel #1
0
def multiclass_model():
    print('Building model...')
    model = Sequential()
    # 256 character-space (ascii only)
    model.add(Embedding(128, embedding_size, input_length=window_size))
    model.add(LSTM(2000, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dense(window_size, activation='sigmoid'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['categorical_accuracy'])
    print(ascii(model))
    return model
    def set_model(self, model):

        with open('tmp_report.txt', 'w') as fh:
            model.summary(print_fn=lambda x: fh.write(x + '\n'))
        with open('tmp_report.txt', 'r') as f:
            summary = f.read()
        os.remove('tmp_report.txt')

        self.send({"architecture":json.loads(model.to_json()),
                   "training": {
                       "optimizer":{**model.optimizer.get_config(), **{"name":model.optimizer.__class__.__name__.lower()}}
                   },
                   "ascii": ascii(model),
                   "summary": summary},
                  "model/")
Beispiel #3
0
def binary_model(embedding_size=105,
                 window_size=56,
                 window_step=4,
                 lstm_size=5480):
    print('Building model...')
    model = Sequential()
    # 128 character-space (ascii only)
    # best was lstm 2000, embedding 200
    model.add(Embedding(128, embedding_size, input_length=window_size))
    model.add(LSTM(lstm_size, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(
        loss='binary_crossentropy',
        optimizer='adam',  #Adam(lr=0.001),
        metrics=['binary_accuracy'])
    print('-' * 20, 'Binary Model', '-' * 20)
    print(ascii(model))
    return model
            batch_generator,
            epochs=1,  #epochs,
            steps_per_epoch=N / batch_size,
            # callbacks=[tbCallback, checkpointCallback]
        )

    # validate
    larger_class, remove_items, N = precompute(filename=sys.argv[2],
                                               multiclass=multiclass,
                                               balance=not multiclass,
                                               window_size=window_size,
                                               window_step=window_step)
    batch_generator = gen_training_data(filename=sys.argv[2],
                                        multiclass=multiclass,
                                        balance=False,
                                        N=N,
                                        window_size=window_size,
                                        window_step=window_step,
                                        batch_size=batch_size)
    score, acc = model.evaluate_generator(batch_generator,
                                          steps=N / batch_size)

    name = modelname(embedding_size, lstm_size, acc, multiclass)
    print('Saving Keras model', name)
    model.save(os.path.abspath('.') + '/models/' + name)

    print('\n', '+' * 20, 'Results', '+' * 20)
    print(ascii(model))
    print('Test score:', score)
    print('Test accuracy:', acc)
Beispiel #5
0
from keras_diagram import ascii

def CNN(Bands,Size,Filepath):
    
    model = Sequential()
        
    model.add(Convolution2D(30, (15, 15), padding='valid', input_shape=(int(Bands), int(Size[0]), int(Size[1])), activation='relu', kernel_initializer='he_normal',, bias_initializer='zeros', name='convolution_1'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(30, (15, 15), activation='relu', kernel_initializer='he_normal'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(15, (5, 5), activation='relu', kernel_initializer='he_normal'))
    model.add(Convolution2D(15, (5, 5), activation='relu', kernel_initializer='he_normal'))
    #model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(512, activation='relu', kernel_initializer='he_normal'))
    model.add(Dense(1, activation='sigmoid', kernel_initializer='he_normal'))
    
    # Compile model
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        
    model.summary()
    Map = ascii(model)
    print('')
    print(Map)
    file = open('Output/'+Filepath+'/Model_Layout.txt','w')
    file.write(Map)
    file.close()
    
    return model
    
Beispiel #6
0
def main():
    """
    """
    train = pd.read_csv('data/train.csv')
    test = pd.read_csv('data/test.csv')

    X_train, X_val, y_train, y_val = train_test_split(train.drop(['label'],
                                                                 axis=1),
                                                      train['label'],
                                                      test_size=VAL_SIZE,
                                                      stratify=train['label'],
                                                      random_state=SEED)
    print(X_train.shape, X_val.shape, y_train.shape, y_val.shape)

    train_dataset = X_train.values
    train_labels = y_train

    valid_dataset = X_val.values
    valid_labels = y_val

    train_dataset = train_dataset.reshape(X_train.shape[0], 28, 28, 1)
    valid_dataset = valid_dataset.reshape(X_val.shape[0], 28, 28, 1)

    train_dataset = train_dataset.astype('float32')
    train_dataset /= 255
    valid_dataset = valid_dataset.astype('float32')
    valid_dataset /= 255

    train_labels = np_utils.to_categorical(train_labels, 10)
    valid_labels = np_utils.to_categorical(valid_labels, 10)

    run_model = create_model()
    run_model.summary()

    start_time = datetime.datetime.now()

    run_model.fit(train_dataset,
                  train_labels,
                  batch_size=BATCH_SIZE,
                  nb_epoch=NUM_EPOCHS,
                  verbose=1)

    names = run_model.metrics_names
    score = run_model.evaluate(valid_dataset, valid_labels, verbose=0)
    print(names, score)

    with open(logfile, "a") as myfile:
        now = datetime.datetime.now()
        results = zip(names, score)
        summary = ascii(run_model)
        print("\n{0} START\n{1} END".format(start_time, now), file=myfile)
        print("learning rate: {} batch size: {} epochs: {} optimizer: adam".
              format(LEARNING_RATE, BATCH_SIZE, NUM_EPOCHS),
              file=myfile)
        for name, score in results:
            print("{0}: {1:.5f}".format(name, score), file=myfile)
        print(summary, file=myfile)

    test_dataset = test.values
    test_dataset = test_dataset.reshape(test.shape[0], 28, 28, 1)
    test_dataset = test_dataset.astype('float32')
    test_dataset /= 255

    predicted_labels = run_model.predict_classes(test_dataset)
    generate_submission(predicted_labels)
input_L1 = Input(shape=(3, 32,32))

conv_L1 = Convolution2D(nb_filters, 
            nb_conv, nb_conv, init='glorot_uniform', 
            activation='relu', border_mode = 'same',
            dim_ordering='th')
conved_L1 = conv_L1(input_L1)

mxpool_L1  = MaxPooling2D((2,2))(conved_L1)

conv_L2 = Convolution2D(nb_filters, nb_conv, nb_conv,
            activation='relu', border_mode='same')

conved_L2 = conv_L1(mxpool_L1)
mxpool_L2 = MaxPooling2D((2,2))(conved_L2)

out_L3 = Flatten()(mxpool_L2)
output_L1 = Dense(128, activation='softmax')(out_L3)

output_L2 = Dense(11, activation='softmax')(output_L1)


model = Model(input_L1, output_L2)

model.summary()

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(X_train, y_train, batch_size=batch_size,validation_data=(X_test, y_test), nb_epoch=nb_epoch, verbose=1)
print(ascii(model))
 def print_model(self):
     print(ascii(self.model))