def run_mlp(**args):

  print("building mlp model:")
  print(args['training_data'].shape[0])
  print(args['training_data'].shape[1])
  model = Sequential()
  model.add(Dense(output_dim=512, input_dim=args['training_data'].shape[1], activation='relu'))
  model.add(Dense(1))
  model.add(Activation('linear'))
  model.compile(loss='mse', optimizer='rmsprop')

  model.fit(args['training_data'], args['training_label'], nb_epoch=20, batch_size=512)

  json_string = model.to_json()
  open('mlp_model_architecture.json', 'w').write(json_string)
  model.save_weights(args['output_weight_filename'], overwrite=True)

  output = model.predict(args['test_data'], verbose=1, batch_size=512)

  if (args['output_type']=='int'):
    output_int = list(map(lambda e:int(e), np.round(output)))
    pickle.dump(output_int, open(args['output_feat_filename'], 'wb'), protocol=4)
    return output_int
  else:
    pickle.dump(output, open(args['output_feat_filename'], 'wb'), protocol=4)
    return output
    def pre_train(self, X=None, monitor='acc', patience=3):
        """

        :param X: Series. Conjunto de datos de texto utilizados para el pre entrenamiento
        :param monitor: String. Variable utilizada para la monitorización de la red
        :param patience: Integer. Cantidad de epochs que han de pasar sin obtener mejora para parar de entrenar la red.
        :return:
         Conjunto de pesos resultado del entrenamiento de la red.
         Estos pesos serán utilizados como inizialización de los pesos de la red de clasificación real
        """

        # Preparamos los datos
        X = self.data_transform.df_towem(X=X, persist=True, direction='data/pre-train-deep.pickle')

        modelo = Sequential()
        modelo.add(Convolution1D(nb_filter=128, filter_length=4, border_mode='same', init='uniform', bias=True,
                                 input_shape=(X.shape[1], None), name='conv_layer'))
        modelo.add(Activation('relu'))
        modelo.add(Dropout(0.2))
        modelo.add(MaxPooling1D(pool_length=4, stride=2))
        modelo.add(LSTM(200), name='lstm_layer')
        modelo.add(Activation('tanh'))
        modelo.add(Dropout(0.1))
        modelo.add(Dense(2, activation='softmax'))
        modelo.compile(loss="categorical_crossentropy", optimizer='adamax', metrics=['accuracy'])

        cbks = [callbacks.EarlyStopping(monitor=monitor, patience=patience)]

        modelo.fit(X, X, callbacks=cbks, validation_split=0.25, shuffle=True,
                   nb_epoch=self.epoch_number, batch_size=self.batch_size, verbose=1)

        modelo.save_weights('model_weights.h5')
Exemple #3
0
def main():
	train_X = np.load('train_X.npy')
	train_y = np.load('train_y.npy')
	test_X = np.load('test_X.npy')
	test_y = np.load('test_y.npy')

	model = Sequential()
	model.add(Flatten(input_shape=(15,60,2)))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dense(900))
	model.add(Activation('sigmoid'))

	print model.summary()

	adam = Adam(0.001)
	#adagrad = Adagrad(lr=0.01)
	model.compile(loss='mse', optimizer=adam)

	model.fit(train_X, train_y, batch_size=batch_size, nb_epoch=nb_epoch,
	          verbose=1, validation_data=(test_X, test_y))
	model.save_weights('model.h5', overwrite=True)
Exemple #4
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
def train_48calibration_net(X_train, y_train):
    print (X_train.shape,y_train.shape)
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    X_train = X_train.astype('float32')
    X_train /= 255
    model = Sequential()
    model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
                            border_mode='valid',
                            input_shape=(3, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool),strides=(2,2)))
    #model.add(BatchNormalization(mode=2))
    model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
    #model.add(BatchNormalization(mode=2))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
          verbose=1, validation_split=0.2)
    json_string = model.to_json()
    open('../model/48calibration_architecture.json', 'w').write(json_string)
    model.save_weights('../model/48calibration_weights.h5')
def train_top_model():
    start = 0.03
    stop = 0.001
    nb_epoch = 300

    train_data = np.load(open('bottleneck_features_train.npy'))
    train_labels = np.load(open('label_train.npy'))

    validation_data = np.load(open('bottleneck_features_validation.npy'))
    validation_labels = np.load(open('label_validation.npy'))

    model = Sequential()
    model.add(Flatten(input_shape=train_data.shape[1:]))
    model.add(Dense(1000, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='relu'))
    model.add(Dense(30))

    sgd = SGD(lr=start, momentum=0.9, nesterov=True)
    model.compile(loss='mean_squared_error', optimizer=sgd)
    learning_rates = np.linspace(start, stop, nb_epoch)
    change_lr = LearningRateScheduler(lambda epoch: float(learning_rates[epoch]))
    hist = model.fit(train_data, train_labels,
                     nb_epoch=nb_epoch,
                     validation_data=(validation_data, validation_labels),
                     callbacks=[change_lr])

    model.save_weights('model_top_vgg.h5')
    np.savetxt('model_top_vgg_flip_loss.csv', hist.history['loss'])
    np.savetxt('model_top_vgg_flip_val_loss.csv', hist.history['val_loss'])
def create_model(X_train, Y_train):
    """create_model will create a very simple neural net model and  save the weights in a predefined directory.

    Args:
        X_train:    Input X_train
        Y_train:    Lables Y_train
    """
    xin = X_train.shape[1]

    model = Sequential()
    model.add(Dense(units=4, input_shape=(xin, )))
    model.add(Activation('tanh'))
    model.add(Dense(4))
    model.add(Activation('linear'))
    model.add(Dense(1))

    rms = kop.RMSprop()

    print('compiling now..')
    model.compile(loss='mse', optimizer=rms)

    model.fit(X_train, Y_train, epochs=1000, batch_size=1, verbose=2)
    score = model.evaluate(X_train, Y_train, batch_size=1)
    print("Evaluation results:", score)
    open('pickles/my_model_architecture.json', 'w').write(model.to_json())

    print("Saving weights in: ./pickles/my_model_weights.h5")
    model.save_weights('pickles/my_model_weights.h5')
Exemple #8
0
def run_mlp(**args):

    print("building mlp model:")
    print(args["training_data"].shape[0])
    print(args["training_data"].shape[1])
    model = Sequential()
    model.add(Dense(output_dim=512, input_dim=args["training_data"].shape[1], activation="relu"))
    # model.add(Dense(output_dim=64, input_dim=128, activation='relu'))
    # model.add(Dense(output_dim=32, input_dim=64, activation='relu'))
    model.add(Dense(1))
    model.add(Activation("linear"))
    model.compile(loss="mse", optimizer="rmsprop")

    model.fit(args["training_data"], args["training_label"], nb_epoch=50, batch_size=512)

    # pickle.dump(model, open('mlp_testmodel.p', 'w'), protocol=4)
    json_string = model.to_json()
    open("mlp_model_architecture.json", "w").write(json_string)
    model.save_weights("mlp_model_weights.h5", overwrite=True)

    # output = model.evaluate(args['test_data'], args['test_label'], batch_size=512)
    output = model.predict(args["test_data"], verbose=1, batch_size=512)
    output_int = list(map(lambda e: int(e), np.round(output)))
    pickle.dump(output_int, open("mlp_output.p", "wb"), protocol=4)

    return output_int
    def train_model(self):
        print '=======begin to prepare data at ' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '========='
        list_sorted = self.word2vec()

        self.y = np.array(list(self.y))
        self.x = list(sequence.pad_sequences(list(self.x), maxlen=max_len))
        self.x = np.array(list(self.x))
        print '=======end to prepare data at ' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '========='

        print '=======begin to train model at ' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '========='
        model = Sequential()
        model.add(Embedding(input_dim=len(list_sorted) + 1, output_dim=256, input_length=max_len))
        model.add(LSTM(128))
        model.add(Dropout(0.5))
        model.add(Dense(1))
        model.add(Activation('sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer='adam')
        model.fit(self.x, self.y, batch_size=16, nb_epoch=10)

        json_string = model.to_json()
        open('sa_model_architecture.json', 'w').write(json_string)
        model.save_weights('sa_model_weights.h5', overwrite=True)
        print '=======end to train model at ' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '========='

        return model
def train_lstm(n_symbols,embedding_weights,x_train,y_train,x_test,y_test):
    print 'Defining a Simple Keras Model...'
    model = Sequential()  # or Graph or whatever
    model.add(Embedding(output_dim=vocab_dim,
                        input_dim=n_symbols,
                        mask_zero=True,
                        weights=[embedding_weights],
                        input_length=input_length))  # Adding Input Length
    model.add(LSTM(output_dim=50, activation='sigmoid', inner_activation='hard_sigmoid'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    print 'Compiling the Model...'
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',metrics=['accuracy'])

    print "Train..."
    model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=n_epoch,verbose=1, validation_data=(x_test, y_test),show_accuracy=True)

    print "Evaluate..."
    score = model.evaluate(x_test, y_test,
                                batch_size=batch_size)

    yaml_string = model.to_yaml()
    with open('lstm_data/lstm.yml', 'w') as outfile:
        outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
    model.save_weights('lstm_data/lstm.h5')
    print 'Test score:', score
Exemple #11
0
def train():
    X, Y = load_data()

    # create model
    model = Sequential()
    # input_dim is the feature number 8 
    #model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))
    model.add(Dense(256, input_dim=8, init='uniform', activation='relu'))
    model.add(Dense(16, init='uniform', activation='relu'))
    model.add(Dense(1, init='uniform', activation='sigmoid'))

    # Compile model
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    # Fit the model
    #model.fit(X, Y, nb_epoch=150, batch_size=10)
    model.fit(X, Y, nb_epoch=1000, batch_size=32, shuffle=True)

    # serialize model to JSON
    model_json = model.to_json()
    with open("model.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("model.h5")
    print("Saved model to disk")
Exemple #12
0
def main(args=sys.argv[1:]):
    model = Sequential()
    model.add(Embedding(vocsize + 1, sizes[0], mask_zero=True,
                        input_length=seq_length))
    for size in sizes[:-1]:
        model.add(LSTM(128, return_sequences=True))
    model.add(LSTM(sizes[-1]))
    model.add(Dense(vocsize))
    model.add(Activation('softmax'))

    print('x.shape:', x.shape)
    print('y.shape:', y.shape)

    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop')

    with open('topology.json', 'w') as f:
        f.write(model.to_json())

    for iteration in range(1, num_batches//batches_per_it):
        print()
        print('-' * 50)
        print('Iteration', iteration)

        model.fit(x, y, batch_size=batch_size,
                  nb_epoch=batches_per_it, verbose=True)
        model.save_weights('brain-{}.h5'.format(iteration))
def train_top_model():
    train_data = np.load(open('bottleneck_features_train.npy'))
    train_labels = np.load(open('target_train.npy'))

    validation_data = np.load(open('bottleneck_features_validation.npy'))
    validation_labels = np.load(open('target_valid.npy'))

    model = Sequential()
    model.add(Flatten(input_shape=train_data.shape[1:]))
    model.add(Dense(128, W_regularizer=l2(0.005)))
    model.add(LeakyReLU(alpha=0.001))
    model.add(Dropout(0.5))
    model.add(Dense(128, W_regularizer=l2(0.005)))
    model.add(LeakyReLU(alpha=0.001))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(optimizer=Adam(lr=1e-5), loss='categorical_crossentropy')

    model.fit(train_data, train_labels,
              nb_epoch=nb_epoch_top_model, batch_size=batch_size,
              validation_data=(validation_data, validation_labels))
    model.save_weights(top_model_weights_path)
    return
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
Exemple #15
0
def train_mlp():
    with open('../data/params_0.pkl', 'rb') as f:
        w_0, _, _ = cPickle.load(f)

    with open('../data/params_1.pkl', 'rb') as f:
        w_1, _, _ = cPickle.load(f)

    with open('../data/params_2.pkl', 'rb') as f:
        w_2, _, _ = cPickle.load(f)

    train_x, train_y = SupervisedLoader.load('../data')

    model = Sequential()
    model.add(Dense(33, 64, weights=[w_0]))
    model.add(Activation('sigmoid'))
    # model.add(Dropout(0.2))
    model.add(Dense(64, 128, weights=[w_1]))
    model.add(Activation('sigmoid'))
    # model.add(Dropout(0.2))
    model.add(Dense(128, 128, weights=[w_2]))
    model.add(Dense(128, 1, init='glorot_uniform'))
    model.add(Activation('relu'))

    # sgd = SGD(lr=1.e-5, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='mean_squared_error', optimizer='adagrad')

    model.fit(train_x, train_y, nb_epoch=500, batch_size=128, validation_split=0.2)

    model.save_weights('../data/mlp_params.hdf5')
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_nested_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Exemple #17
0
def trainModel():
    inputs, correctOutputs = getNNData()

    print("Collected data")

    trainingInputs = inputs[:len(inputs)//2]
    trainingOutputs = correctOutputs[:len(correctOutputs)//2]

    testInputs = inputs[len(inputs)//2:]
    testOutputs = correctOutputs[len(correctOutputs)//2:]

    model = Sequential()
    model.add(Dense(24, input_shape=(24, )))
    model.add(Activation('tanh'))
    model.add(Dense(24))
    model.add(Activation('tanh'))
    model.add(Dense(5))
    model.add(Activation('softmax'))

    model.summary()

    model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True))

    model.fit(trainingInputs, trainingOutputs, validation_data=(testInputs, testOutputs))
    score = model.evaluate(testInputs, testOutputs, verbose=0)
    print(score)

    json_string = model.to_json()
    open('my_model_architecture.json', 'w').write(json_string)
    model.save_weights('my_model_weights.h5', overwrite=True)
Exemple #18
0
def get_nn_model(token_dict_size):
    _logger.info('Initializing NN model with the following params:')
    _logger.info('Input dimension: %s (token vector size)' % TOKEN_REPRESENTATION_SIZE)
    _logger.info('Hidden dimension: %s' % HIDDEN_LAYER_DIMENSION)
    _logger.info('Output dimension: %s (token dict size)' % token_dict_size)
    _logger.info('Input seq length: %s ' % INPUT_SEQUENCE_LENGTH)
    _logger.info('Output seq length: %s ' % ANSWER_MAX_TOKEN_LENGTH)
    _logger.info('Batch size: %s' % SAMPLES_BATCH_SIZE)

    model = Sequential()
    seq2seq = SimpleSeq2seq(
        input_dim=TOKEN_REPRESENTATION_SIZE,
        input_length=INPUT_SEQUENCE_LENGTH,
        hidden_dim=HIDDEN_LAYER_DIMENSION,
        output_dim=token_dict_size,
        output_length=ANSWER_MAX_TOKEN_LENGTH,
        depth=1
    )

    model.add(seq2seq)
    model.compile(loss='mse', optimizer='rmsprop')

    model.save_weights(NN_MODEL_PATH)

    # use previously saved model if it exists
    _logger.info('Looking for a model %s' % NN_MODEL_PATH)

    if os.path.isfile(NN_MODEL_PATH):
        _logger.info('Loading previously calculated weights...')
        model.load_weights(NN_MODEL_PATH)

    _logger.info('Model is built')
    return model
Exemple #19
0
def main():
    savepath = './save_point'
    filepath = './save_point/keras_example_checkpoint.h5'

    # Extract MNIST dataset
    train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
    train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
    test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
    test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')

    train_data = extract_data(train_data_filename, 60000, dense=False)
    train_data = train_data.reshape((60000, NUM_CHANNELS, IMG_SIZE, IMG_SIZE))
    train_labels = extract_labels(train_labels_filename, 60000, one_hot=True)
    test_data = extract_data(test_data_filename, 10000, dense=False)
    test_data = test_data.reshape((10000, NUM_CHANNELS, IMG_SIZE, IMG_SIZE))
    test_labels = extract_labels(test_labels_filename, 10000, one_hot=True)

    validation_data = train_data[:VALIDATION_SIZE, ...]
    validation_labels = train_labels[:VALIDATION_SIZE, :]
    validation_set = (validation_data, validation_labels)
    train_data = train_data[VALIDATION_SIZE:, ...]
    train_labels = train_labels[VALIDATION_SIZE:, ...]

    # Model construction
    model = Sequential()
    model.add(Convolution2D(32, 3, 3, border_mode='same',
              input_shape=(1, 28, 28)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    # Define optimizer and configure training process
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=["accuracy"])

    model.fit(
        train_data,
        train_labels,
        nb_epoch=NUM_EPOCHS,
        batch_size=1000,
        validation_data=validation_set)

    print 'Save model weights'
    if not os.path.isdir (savepath):
        os.mkdir (savepath)
    model.save_weights(filepath, overwrite=True)

    predict = model.predict(test_data, batch_size=1000)

    print 'Test err: %.1f%%' % error_rate(predict, test_labels)

    print 'Test loss: %1.f%%, accuracy: %1.f%%', \
        tuple(model.evaluate(test_data, test_labels, batch_size=1000))
def main():
    label_set = 'function'
    df = sentences_df(labels=label_set)
    labels = np.unique(df.label.values)

    out = open(GENERATED_TEXT, 'a')

    for label in labels:
        label_df = df[df.label == label]
        sents = label_df['sentence'].values
        text = '\n'.join(sent for sent in sents)
        print 'corpus length:', len(text)

        chars = sorted(list(set(text)))
        print 'total chars:', len(chars)
        char_indices = dict((c,i) for i,c in enumerate(chars))
        indices_char = dict((i,c) for i,c in enumerate(chars))

        # cut text into sequences of maxlen chars
        maxlen = 60
        step = 3
        sentences = []
        next_chars = []
        for i in range(0, len(text) - maxlen, step):
            sentences.append(text[i: i + maxlen])
            next_chars.append(text[i + maxlen])
        print 'nb sequences:', len(sentences)

        print 'Vectorization...'
        X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
        y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
        for i, sentence in enumerate(sentences):
            for t, char in enumerate(sentence):
                X[i, t, char_indices[char]] = 1
            y[i, char_indices[next_chars[i]]] = 1

        print 'Build model...'
        model = Sequential()
        model.add(GRU(512, return_sequences=True,
                      input_shape=(maxlen, len(chars)), activation='relu'))
        model.add(Dropout(0.5))
        #model.add(GRU(512, return_sequences=True, activation='relu'))
        #model.add(Dropout(0.5))
        model.add(GRU(512, return_sequences=False, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(len(chars), activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='adam')
        print_summary(model.layers)

        log('label: %s\n' % label, out)
        train_and_generate(600, X, y, model, text, maxlen, chars,
                           indices_char, char_indices, out)

        model.save_weights('%s.h5' % label)

    out.close()
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Exemple #22
0
def test_loading_weights_by_name_2():
    """
    test loading model weights by name on:
        - both sequential and functional api models
        - different architecture with shared names
    """

    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = losses.mse

    # sequential model
    model = Sequential()
    model.add(Dense(2, input_shape=(3,), name='rick'))
    model.add(Dense(3, name='morty'))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    old_weights = [layer.get_weights() for layer in model.layers]
    _, fname = tempfile.mkstemp('.h5')

    model.save_weights(fname)

    # delete and recreate model using Functional API
    del(model)
    data = Input(shape=(3,))
    rick = Dense(2, name='rick')(data)
    jerry = Dense(3, name='jerry')(rick)  # add 2 layers (but maintain shapes)
    jessica = Dense(2, name='jessica')(jerry)
    morty = Dense(3, name='morty')(jessica)

    model = Model(inputs=[data], outputs=[morty])
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    # load weights from first model
    model.load_weights(fname, by_name=True)
    os.remove(fname)

    out2 = model.predict(x)
    assert np.max(np.abs(out - out2)) > 1e-05

    rick = model.layers[1].get_weights()
    jerry = model.layers[2].get_weights()
    jessica = model.layers[3].get_weights()
    morty = model.layers[4].get_weights()

    assert_allclose(old_weights[0][0], rick[0], atol=1e-05)
    assert_allclose(old_weights[0][1], rick[1], atol=1e-05)
    assert_allclose(old_weights[1][0], morty[0], atol=1e-05)
    assert_allclose(old_weights[1][1], morty[1], atol=1e-05)
    assert_allclose(np.zeros_like(jerry[1]), jerry[1])  # biases init to 0
    assert_allclose(np.zeros_like(jessica[1]), jessica[1])  # biases init to 0
Exemple #23
0
class MNISTClassifier(object):

    def __init__(self, batch_size=128, nb_classes=10):
        self.img_rows, self.img_cols = 28, 28
        self.batch_size = batch_size
        self.nb_classes = nb_classes

    def _loadAndPreprocessTraining(self):
        (self.X_train, self.y_train), (self.X_test, self.y_test) = mnist.load_data()

        self.X_train = self.X_train.reshape(self.X_train.shape[0], 1, self.img_rows, self.img_cols).astype('float32') / 255
        self.X_test = self.X_test.reshape(self.X_test.shape[0], 1, self.img_rows, self.img_cols).astype('float32') / 255

        self.Y_train = np_utils.to_categorical(self.y_train, self.nb_classes)
        self.Y_test = np_utils.to_categorical(self.y_test, self.nb_classes)

    def buildNN(self):
        if os.path.isfile("MNISTArchitecture.json"):
            print("  loading architecture")
            self.model = model_from_json(open('MNISTArchitecture.json').read())
        else:
            self.model = Sequential()

            self.model.add(Convolution2D(64, 5, 5, input_shape=(1, self.img_rows, self.img_cols)))
            self.model.add(Activation('relu'))
            self.model.add(MaxPooling2D(pool_size=(2, 2)))

            self.model.add(Flatten())
            self.model.add(Dense(256))
            self.model.add(Activation('relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(self.nb_classes))
            self.model.add(Activation('softmax'))

            mnistArchitecture = self.model.to_json()
            open('MNISTArchitecture.json', 'w').write(mnistArchitecture)
            pass

        self.model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.0000001, momentum=0.8, nesterov=True), metrics=['accuracy'])

        if os.path.isfile("MNISTWeights.h5"):
            print("  loading weights")
            self.model.load_weights('MNISTWeights.h5')

    def train(self, nb_epoch=24):
        self._loadAndPreprocessTraining()
        self.model.fit(self.X_train, self.Y_train, batch_size=self.batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(self.X_test, self.Y_test))
        self.model.save_weights('MNISTWeights.h5', overwrite=True)

        score = self.model.evaluate(self.X_test, self.Y_test, verbose=0)
        print('Test score:', score[0])
        print('Test accuracy:', score[1])

    def predict(self, data):
        data = data.reshape(data.shape[0], 1, 28, 28).astype('float32') / 255
        return self.model.predict_classes(data, batch_size=32)
Exemple #24
0
    def test_merge_concat(self):
        print('Test merge: concat')
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_hidden * 2, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict([X_test, X_test], verbose=0)
        classes = model.predict_classes([X_test, X_test], verbose=0)
        probas = model.predict_proba([X_test, X_test], verbose=0)
        print(model.get_config(verbose=1))

        print('test weight saving')
        model.save_weights('temp.h5', overwrite=True)
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_hidden * 2, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights('temp.h5')

        nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)
Exemple #25
0
def train(cwsInfo, cwsData, modelPath, weightPath):

    (initProb, tranProb), (vocab, indexVocab) = cwsInfo
    (X, y) = cwsData

    train_X, test_X, train_y, test_y = train_test_split(X, y , train_size=0.9, random_state=1)

    #train_X = [ [ [],[],...,[] ] ]  train_y = [ [],[],...,[] ]
    train_X = np.array(train_X)
    train_y = np.array(train_y)
    test_X = np.array(test_X)
    test_y = np.array(test_y)

    outputDims = len(corpus_tags)
    Y_train = np_utils.to_categorical(train_y, outputDims)
    Y_test = np_utils.to_categorical(test_y, outputDims)
    batchSize = 128
    vocabSize = len(vocab) + 1
    wordDims = 100
    maxlen = 7
    hiddenDims = 100

    w2vModel = Word2Vec.load('model/sougou.char.model')
    embeddingDim = w2vModel.vector_size
    embeddingUnknown = [0 for i in range(embeddingDim)]
    embeddingWeights = np.zeros((vocabSize + 1, embeddingDim))
    for word, index in vocab.items():
        if word in w2vModel:
            e = w2vModel[word]
        else:
            e = embeddingUnknown
        embeddingWeights[index, :] = e

    #LSTM
    model = Sequential()
    model.add(Embedding(output_dim = embeddingDim, input_dim = vocabSize + 1,
        input_length = maxlen, mask_zero = True, weights = [embeddingWeights]))
    model.add(LSTM(output_dim = hiddenDims, return_sequences = True))
    model.add(LSTM(output_dim = hiddenDims, return_sequences = False))
    model.add(Dropout(0.5))
    model.add(Dense(outputDims))
    model.add(Activation('softmax'))
    model.compile(loss = 'categorical_crossentropy', optimizer = 'adam')

    result = model.fit(train_X, Y_train, batch_size = batchSize,
                    nb_epoch = 20, validation_data = (test_X,Y_test), show_accuracy=True)

    j = model.to_json()
    fd = open(modelPath, 'w')
    fd.write(j)
    fd.close()

    model.save_weights(weightPath)

    return model
Exemple #26
0
def cnn():
    train_data, train_label, test_data, test_label = load_data()
    train_data = train_data / 255.0
    test_data = test_data / 255.0

    print(train_data.shape[0], 'samples')
    print(test_data.shape[0]), 'test'
    print train_label
    
    train_label = np_utils.to_categorical(train_label, 28) # divide into 28 categories
    test_label = np_utils.to_categorical(test_label, 28)

    model = Sequential()

    model.add(Convolution2D(16, 4, 4,
                            border_mode='valid',
                            input_shape=(1, 28, 28)))
    model.add(Activation('relu'))

    model.add(Convolution2D(32, 4, 4, border_mode='full'))
    model.add(Activation('relu'))

    model.add(Convolution2D(32, 4, 4, border_mode='full'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 4, 4, border_mode='full'))
    model.add(Activation('relu'))

    model.add(Convolution2D(64, 4, 4, border_mode='full'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(28))
    model.add(Activation('softmax'))


    model.load_weights('model_weights.h5')
    model.compile(loss='categorical_crossentropy', optimizer='adadelta')

    model.fit(train_data, train_label, batch_size=100, nb_epoch=10, shuffle=True, verbose=1, show_accuracy=True, validation_data=(test_data, test_label))


    json_string  = model.to_json()


    open('model_architecture.json','w').write(json_string)
    model.save_weights('model_weights.h5')
Exemple #27
0
def test_siamese_1():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss < 0.8)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    # test weight saving
    fname = 'test_siamese_1.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
def model(labels, data, go_id):
    # set parameters:
    # Embedding
    # Convolution
    nb_conv = 7
    nb_filter = 64
    nb_pool = 2

  
    # Training
    batch_size = 30
    nb_epoch = 12

    train, val, test = train_val_test_split(
        labels, data, batch_size=batch_size)
    train_label, train_data = train

    val_label, val_data = val
    test_label, test_data = test
    test_label_rep = test_label
    
    train_data = train_data.reshape(train_data.shape[0], 1, 500, 20)
    test_data = test_data.reshape(test_data.shape[0], 1, 500, 20)
    val_data = val_data.reshape(val_data.shape[0], 1, 500, 20)
    model = Sequential()
    model.add(Convolution2D(96, nb_conv, 1,
                        border_mode='valid',
                        input_shape=(1, 500, 20)))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filter, 3, 1,
                        border_mode='valid'))
    model.add(Activation('relu'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(
        loss='binary_crossentropy', optimizer='adam', class_mode='binary')

    model.fit(
        X=train_data, y=train_label,
        batch_size=batch_size, nb_epoch=nb_epoch,
        show_accuracy=True, verbose=1,
        validation_data=(val_data, val_label))
    # # Loading saved weights
    # print 'Loading weights'
    # model.load_weights(DATA_ROOT + go_id + '.hdf5')
    pred_data = model.predict_classes(test_data, batch_size=batch_size)
    # Saving the model
    print 'Saving the model for ' + go_id
    model.save_weights(DATA_ROOT + go_id + '.hdf5', overwrite=True)
    return classification_report(list(test_label_rep), pred_data)
Exemple #29
0
def run(filename=filename,
        batch_size=50,
        layer_num=2,
        seq_length=50,
        hidden_dim=500,
        generate_length=500,
        epochs=20,
        mode='train',
        weights='',
        maxlen=100000):
    from keras.models import Sequential
    from keras.layers.core import Dense, Activation, Dropout
    from keras.layers.recurrent import LSTM, SimpleRNN
    from keras.layers.wrappers import TimeDistributed
    print('run filename={}'.format(filename))
    X, y, vocab_size, ix_to_char = load_data(seq_length, filename, maxlen)

    # Creating and compiling the Network
    model = Sequential()
    model.add(LSTM(hidden_dim, input_shape=(None, vocab_size), return_sequences=True))
    for i in range(layer_num - 1):
        model.add(LSTM(hidden_dim, return_sequences=True))
    model.add(TimeDistributed(Dense(vocab_size)))
    model.add(Activation('softmax'))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    # Generate some sample before training to know how bad it is!
    generate_text(model, generate_length, vocab_size, ix_to_char)

    if not weights == '':
        model.load_weights(weights)
        epochs = int(weights[weights.rfind('_') + 1:weights.find('.')])
    else:
        epochs = 0

    # Training if there is no trained weights specified
    if mode == 'train' or weights == '':
        while True:
            print('\n\nEpoch: {}\n'.format(epochs))
            model.fit(X, y, batch_size=batch_size, verbose=1, epochs=1)
            epochs += 1
            generate_text(model, generate_length, vocab_size, ix_to_char)
            if epochs % 10 == 0:
                model.save_weights('checkpoint_layer_{}_hidden_{}_epoch_{}.hdf5'.format(layer_num, hidden_dim, epochs))

    # Else, loading the trained weights and performing generation only
    elif weights != '':
        # Loading the trained weights
        model.load_weights(weights)
        generate_text(model, generate_length, vocab_size, ix_to_char)
        print('\n\n')
    else:
        print('\n\nNothing to do!')
Exemple #30
0
    def train_NN(self, X, y, lr, bs, af, reuse_weights = False):
        '''
        Input: 
            X - data matrix (train_num, feat_num)
            y - target labels matrix (train_num, label_num)
            lr = SGD learning rate
            bs = batch size denominator
            af = activation function
            reuse_weights - use previous trained weights

        Output: 
            best_clf - best classifier trained with hp
            best_score - CV score of best classifier
            pca - PCA used for dimensionality reduction

        Tunes neural net with hp.    
        '''
        n_samples, n_feat = X.shape

        b_size = int(n_samples/bs)
        hidden_units = 400  #will be changed to use values from hyperparameter list
        cv_folds = 10
        kf = KFold(n_samples, cv_folds, shuffle=False)

        #create neural net
        clf  = Sequential()
        clf.add(Dense(hidden_units, input_dim=n_feat, activation=af))
        clf.add(Dense(self.target_num, activation = 'sigmoid'))
        sgd = SGD(lr=lr, momentum=0.0, decay=0.0, nesterov=False)

        if (reuse_weights == True):
            clf.load_weights('nn_weights.h5')
        
        clf.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
        
        score_total = 0 #running total of metric score over all cv runs
        for train_index, test_index in kf:
            X_train, X_test = X[train_index], X[test_index] 
            y_train, y_test = y[train_index], y[test_index]
            
            clf.fit(X_train, y_train, nb_epoch=5, batch_size=b_size, verbose = 0)
            cv_pred = clf.predict(X_test, batch_size = b_size)

            score = eval(self.metric + '(y_test[:,None], cv_pred, "' + self.task + '")')
            score_total += score

        clf.save_weights('nn_weights.h5', overwrite=True)
        
        best_score = score_total/cv_folds
        best_clf = clf

        return best_clf, best_score
print("model3 created.")

model3.summary()


# ## trainings are done in Tesla. So comment the block here.

# In[14]:



#now train it. Save the weights and history.

start_time = time.time()

h = model3.fit(X_train_train, y_train_train, batch_size = batch_size, epochs = epochs, validation_data = (X_train_val, y_train_val), verbose = 1)

model3.save_weights("model3_weights.h5")
pickle.dump(h.history, open("model3_history.pk","wb"))

end_time = time.time()

print("total training time: ", end_time - start_time)


# In[15]:


#traing done. So load the weights and test.

Exemple #32
0
# Using Theano backend.
# [[ 0.25961322  0.3711614   0.36922538]
#  [ 0.28979987  0.35393432  0.35626581]
#  [ 0.76556748  0.07756034  0.1568722 ]
#  [ 0.24069023  0.39578772  0.36352205]
#  [ 0.7126801   0.1006901   0.18662977]]
# [[0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]]
# dan@pavlap:~/kjs10 $ 
# dan@pavlap:~/kjs10 $ 
# dan@pavlap:~/kjs10 $ 


# I should save the model:
# ref:
# https://github.com/transcranial/keras-js#usage
model.save_weights('model.hdf5')
with open('model.json', 'w') as f:
  f.write(model.to_json())
print('model saved as: model.hdf5 and model.json')

# I should run this shell command:
# python encoder.py model.hdf5
#from subprocess import call
#call(["python","encoder.py","model.hdf5"])

# I should create model_weights.buf, model_metadata.json:
import encoder
enc = encoder.Encoder('model.hdf5')
enc.serialize()
enc.save()
'bye'
Exemple #33
0
                                                  class_mode="categorical")

hist = model.fit_generator(generator=train_generator,
                           steps_per_epoch=1600 // batch_size,
                           epochs=100,
                           validation_data=test_generator,
                           validation_steps=800 // batch_size)

#model fit edildi
#steps_per_epoch = her bir epoch ta 1600//batch_size kadar iterasyon yap.
#validation_steps test_generator için steps belirlendi
#Normalde her classta 492 image vardı. Şimdi imagegenerator ile başka başka da imageler oldu
#sonucu elimizde tutmak için histe attık.

#%% model save
model.save_weights("deneme.h5")
#elde edilen sonuucu deneme.h5 ile kaydettik.

#%% model evaluation
#elde edilen değerleri görselleştirdik.
print(hist.history.keys())  #train, validation üzerinde elde edilen değerler.
plt.plot(hist.history["loss"], label="Train Loss")
plt.plot(hist.history["val_loss"], label="Validation Loss")
plt.legend()
plt.show()
plt.figure()
plt.plot(hist.history["acc"], label="Train acc")
plt.plot(hist.history["val_acc"], label="Validation acc")
plt.legend()
plt.show()
class CardPredictor:
    def __del__(self):
        self.save_traindata()

    def train_model(self):
        self.model = Sequential()
        self.model.add(
            Conv2D(filters=48,
                   kernel_size=(3, 3),
                   input_shape=(32, 32, 3),
                   activation='relu',
                   padding='same'))
        self.model.add(Dropout(0.20))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same'))
        self.model.add(Dropout(0.20))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same'))
        self.model.add(Dropout(0.20))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same'))
        self.model.add(Dropout(0.20))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same'))
        self.model.add(Dropout(0.20))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(
            Conv2D(filters=48,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same'))
        self.model.add(Dropout(0.20))

        self.model.add(Flatten())
        self.model.add(Dropout(0.25))
        self.model.add(Dense(1000, activation='relu'))
        self.model.add(Dropout(0.25))
        self.model.add(Dense(500, activation='relu'))
        self.model.add(Dropout(0.25))
        self.model.add(Dense(250, activation='relu'))
        self.model.add(Dropout(0.25))
        self.model.add(Dense(10, activation='softmax'))
        print(self.model.summary())
        if os.path.exists("../flower10model1.h5"):
            self.model.load_weights("../flower10model1.h5")
            print("模型加载成功")
        else:
            print('没有模型加载,开始训练新模型')
            x_train, y_train = read.read_images_labels(data_dir=data_dir,
                                                       batch_size=10000)
            x_train_one = x_train * (1. / 255) - 0.5
            self.model.compile(loss='binary_crossentropy',
                               optimizer='adamax',
                               metrics=['accuracy'])
            train_history = self.model.fit(x=x_train_one,
                                           y=y_train,
                                           validation_split=0.2,
                                           epochs=5,
                                           batch_size=128,
                                           verbose=2)

    def save_traindata(self):
        if not os.path.exists("../flower10model1.h5"):
            self.model.save_weights("../flower10model1.h5")

    def predict(self, img):
        if type(img) == type(""):
            img = image_read(img)

        # pic_hight, pic_width = img.shape[:2]
        # if pic_width > MAX_WIDTH:
        #     resize_rate = MAX_WIDTH / pic_width
        #     img = cv2.resize(img, (MAX_WIDTH, int(pic_hight * resize_rate)), interpolation=cv2.INTER_AREA)

        img = img.reshape([1, IMAGE_SIZE, IMAGE_SIZE, 3])
        img = img * (1. / 255) - 0.5

        resp = self.model.predict(img)[0]
        r = self.model.predict_classes(img)[0]

        read_label_dict()
        label = labels[r]
        num = 0
        pre = ''
        for re in resp:
            pre = pre + labels[num] + ':' + str(re) + '\n'
            num += 1
        return label, pre
Exemple #35
0
class Agent(object):
    def __init__(self, game, weights=None):
        self.model = Sequential()
        self.model.add(
            Dense(32,
                  input_dim=12,
                  activation='relu',
                  kernel_initializer=VarianceScaling(scale=2.0)))
        self.model.add(
            Dense(9,
                  activation='linear',
                  kernel_initializer=VarianceScaling(scale=2.0)))
        self.game = game
        self.game.agent = self
        self.counter = 0
        self.target_model = clone_model(self.model)
        self.target_model.set_weights(self.model.get_weights())
        if not weights is None:
            self.model.load_weights(weights)

    def train(self,
              lr=0.001,
              n_episodes=500,
              max_t=1000,
              epsilon=0.9,
              min_epsilon=0.05,
              batch_size=128,
              discount=0.99,
              max_memory_size=10000,
              action_per_seconds=4,
              epsilon_rate=5e-2,
              C=15):
        sgd = optimizers.SGD(lr=lr)
        self.model.compile(loss=huber_loss, optimizer=sgd)
        self.trainning = True
        self.epsilon = epsilon
        self.min_epsilon = min_epsilon
        self.epsilon_rate = epsilon_rate
        self.X = []
        self.previous_action = 8
        self.batch_size = batch_size
        self.discount = discount
        self.max_t = max_t
        self.max_memory_size = max_memory_size
        self.iteration = 0
        self.game.max_t = self.max_t
        self.reapeat_action = int(self.game.fps / action_per_seconds)
        self.cycle = 0
        self.running_reward = 0
        self.running_score = 0
        self.C = C
        self.best_score = 0
        self.game.game_loop()

    def act(self, reward):
        if not hasattr(self, 'previous_state') or self.game.car.has_reset:
            self.previous_state = self.build_state()

        self.running_score += reward
        self.running_reward += reward
        state = self.build_state()
        if self.counter == 0:
            if np.random.uniform() < self.epsilon and self.trainning:
                action, keys = self.return_action('random')

            else:
                actions_values = self.model.predict(np.array([state]))[0]
                action, keys = self.return_action(actions_values)

            self.counter = (self.counter + 1) % self.reapeat_action

        else:
            self.counter = (self.counter + 1) % self.reapeat_action
            action, keys = self.previous_action_keys

        if self.trainning:
            if self.counter == 0 or self.game.car.has_reset:
                self.append_X(self.running_reward, state)
                self.previous_state = state
                self.previous_action = action
                self.running_reward = 0
                self.counter = 0
                self.iteration += 1

            if self.iteration > self.batch_size and self.iteration % 1 == 0:
                x, y = self.build_batch()
                self.model.fit(x,
                               y,
                               epochs=1,
                               batch_size=self.batch_size,
                               verbose=0)

            if self.cycle % self.C == 0:
                self.target_model.set_weights(self.model.get_weights())

            if self.game.car.has_reset:
                if self.iteration > self.batch_size:
                    self.epsilon = max(
                        self.epsilon +
                        (self.min_epsilon - self.epsilon) * self.epsilon_rate,
                        self.min_epsilon)
                self.cycle += 1
                if self.cycle % self.C == 0:
                    if self.iteration > self.batch_size:
                        print(
                            "Finished cycle %i, score: %.2f, previous_loss: %.2f"
                            % (self.cycle, self.running_score,
                               self.model.history.history['loss'][-1]))
                    else:
                        print("Finished cycle %i, score: %.2f" %
                              (self.cycle, self.running_score))

                if self.running_score > self.best_score:
                    self.model.save_weights('agents/best_weights')

                self.running_score = 0

        self.previous_action_keys = (action, keys)
        return keys

    def build_batch(self):
        X = []
        Y = []
        for _ in range(self.batch_size):
            i = np.random.randint(0, len(self.X))
            x = self.X[i]
            X.append(x['state'])
            y = self.model.predict(np.array([x['state']]))[0]
            y[x['action']] = x['reward']
            if not x['terminal']:
                y[x['action']] += self.discount * np.max(
                    self.target_model.predict(np.array([x['next']]))[0])

            Y.append(y)

        return np.array(X), np.array(Y)

    def append_X(self, reward, state):
        x = {}
        x['state'] = self.previous_state
        x['next'] = state
        x['action'] = self.previous_action
        x['reward'] = reward
        x['terminal'] = self.game.car.has_reset
        if not check_nans([_ for k, _ in x.items()]):
            if len(self.X) >= self.max_memory_size:
                self.X = self.X[1:]

            self.X.append(x)

    def build_state(self):
        state = []
        for (x, _) in self.game.car.collision_points:
            state.append(x)

        state.append(self.game.car.speed)
        state.append(self.game.car.acc)
        state.append(self.game.car.theta_wheels)
        state.append(self.game.car.lat_speed)

        return state

    def return_action(self, values):
        action_space = {
            pygame.K_UP: False,
            pygame.K_DOWN: False,
            pygame.K_LEFT: False,
            pygame.K_RIGHT: False,
            pygame.K_r: False
        }
        if values == 'random':
            values = [0] * 9
            values[np.random.randint(0, 9)] = 1

        action = np.argmax(values)
        if action in [0, 4, 6]:
            action_space[pygame.K_UP] = True

        if action in [3, 5, 7]:
            action_space[pygame.K_DOWN] = True

        if action in [1, 4, 5]:
            action_space[pygame.K_LEFT] = True

        if action in [2, 6, 7]:
            action_space[pygame.K_RIGHT] = True

        return action, action_space
                      batch_size=16)

#model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print(full_model.summary())

np.random.seed(seed)
#hist = model.fit(X_train, y_train, validation_data=(X_test, y_test),
#         epochs=epochs, batch_size=10, shuffle=True, callbacks=[earlyStopping])
#hist = model.load_weights('./64.15/model.h5');
# Final evaluation of the model
scores = full_model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.legend(['train', 'test'])
plt.title('loss')
plt.savefig("loss7.png", dpi=300, format="png")
plt.figure()
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.legend(['train', 'test'])
plt.title('accuracy')
plt.savefig("accuracy7.png", dpi=300, format="png")
model_json = full_model.to_json()
with open("model7.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
full_model.save_weights("model7.h5")
print("Saved model to disk")

print(train_X.shape)
print(train_Y.shape)

model = Sequential()

model.add(Conv2D(32, (3, 3), input_shape=train_X.shape[1:], activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(100, activation='relu', name='FC'))
model.add(Dropout(0.3))
model.add(Dense(10, activation='softmax')) # the last layer has 10 perceptrons because there are 10 classes

model.summary()

model.compile(loss = keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.sgd(lr=2.0e-4),
              metrics=['accuracy'])

model.fit(train_X, train_Y,
          batch_size=32,
          verbose=1,
          epochs=3,
          validation_data=(test_X, test_Y))

model.save('./nn_model/pretrained_model.dat')
model.save_weights('./nn_model/pretrained_model_weights.dat')
Exemple #38
0
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                 activation='relu',
                 input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

model.save_weights('digitRecognition.h5')


Exemple #39
0
                optimizer='adam',
                metrics=['accuracy'])

# Fit the model
t_model.fit(turn_input_data, turn_output_data, epochs=150, batch_size=2)
# evaluate the model
scores = t_model.evaluate(turn_input_data, turn_output_data)
print("%s: %.2f%%" % (t_model.metrics_names[1], scores[1] * 100))

# serialize model to JSON
model_json = t_model.to_json()
with open("/Users/daniel/Desktop/code/PokerGame/model3.json",
          "w+") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
t_model.save_weights("/Users/daniel/Desktop/code/PokerGame/model3.h5")
print("Saved model to disk")

# load json and create model
json_file = open('/Users/daniel/Desktop/code/PokerGame/model3.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("/Users/daniel/Desktop/code/PokerGame/model3.h5")
print("Loaded model from disk")

# evaluate loaded model on test data
loaded_model.compile(loss='mean_squared_error',
                     optimizer='rmsprop',
                     metrics=['accuracy'])
Exemple #40
0
#fully connected neural networks
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(num_labels, activation='softmax'))

# model.summary()

#Compliling the model
model.compile(loss=categorical_crossentropy,
              optimizer=Adam(),
              metrics=['accuracy'])

#Training the model
model.fit(X_train,
          train_y,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(X_test, test_y),
          shuffle=True)

#Saving the  model to  use it later on
fer_json = model.to_json()
with open("fer.json", "w") as json_file:
    json_file.write(fer_json)
model.save_weights("fer.h5")
sgd = SGD(lr=learnrate2, decay=1e-6, momentum=0.9, nesterov=True)
# model2.load_weights('lstm_weights_test.h5')
print 'Compiling model2...'
model2.compile(optimizer='sgd',
               loss='categorical_crossentropy',
               metrics=['accuracy'])

print ' Fitting model2...'
model2.fit(X,
           Y,
           verbose=1,
           batch_size=batch_size,
           nb_epoch=epochs,
           validation_split=0.2)
model2.save_weights('lstm_weights_test.h5', overwrite=True)

# ----------------------------- end of LSTM architecture -----------------------------
lstm_f = K.function(
    [model2.layers[0].input, K.learning_phase()], [model2.layers[3].output])

lstmfeat = lstm_f([X, 0])[0]
Y_2 = Y_0[seq_length:]

lstmfeat = np.asarray(lstmfeat)
Y_2 = np.asarray(Y_2)

lin_clf = svm.LinearSVC()
lin_clf.fit(lstmfeat, Y_2)
print(lin_clf)
Exemple #42
0
                                            class_mode='binary')

classifier.fit_generator(training_set,
                         steps_per_epoch=8000,
                         epochs=3,
                         validation_data=test_set,
                         validation_steps=2000)

import numpy as np
from keras.preprocessing import image
#loading image and it's dimetion must fit to the model we have train ex:128
test_image = image.load_img('dataset/single_prediction/cat.4053.jpg',
                            target_size=(128, 128))
#turn test_image into 3D array that can fit out model
test_image = image.img_to_array(test_image)
#but if u run the model and it's will required 4D, so u need to using numpy to add one dimesion
test_image = np.expand_dims(test_image, axis=0)
request = classifier.predict(test_image)
#need to know what 0 and 1 represent we need to find out from train_set
result_represent = training_set.class_indices

if request[0][0] == 1:
    predict = 'Dog'
else:
    predict = 'Cat'

predict

from keras.models import load_model
classifier.save_weights('CatandDogCNN.h5')
Exemple #43
0
def anamoly():
    dataset = pd.read_csv('kd10.csv')
    X = dataset.iloc[:, :-1].values
    Y = dataset.iloc[:, 41].values

    labelencoder_x_1 = LabelEncoder()
    labelencoder_x_2 = LabelEncoder()
    labelencoder_x_3 = LabelEncoder()
    labelencoder_y = LabelEncoder()

    labelencoder_x_1 = labelencoder_x_1.fit(['icmp' 'tcp' 'udp'])
    labelencoder_x_2 = labelencoder_x_2.fit([
        'IRC'
        'X11'
        'Z39_50'
        'aol'
        'auth'
        'bgp'
        'courier'
        'csnet_ns'
        'ctf'
        'daytime'
        'discard'
        'domain'
        'domain_u'
        'echo'
        'eco_i'
        'ecr_i'
        'efs'
        'exec'
        'finger'
        'ftp'
        'ftp_data'
        'gopher'
        'harvest'
        'hostnames'
        'http'
        'http_2784'
        'http_443'
        'http_8001'
        'imap4'
        'iso_tsap'
        'klogin'
        'kshell'
        'ldap'
        'link'
        'login'
        'mtp'
        'name'
        'netbios_dgm'
        'netbios_ns'
        'netbios_ssn'
        'netstat'
        'nnsp'
        'nntp'
        'other'
        'pm_dump'
        'pop_2'
        'pop_3'
        'printer'
        'private'
        'remote_job'
        'rje'
        'shell'
        'smtp'
        'sql_net'
        'ssh'
        'sunrpc'
        'supdup'
        'systat'
        'telnet'
        'tim_i'
        'time'
        'urp_i'
        'uucp'
        'uucp_path'
        'vmnet'
        'whois'
    ])
    labelencoder_x_3 = labelencoder_x_1.fit(
        ['OTH'
         'REJ'
         'RSTO'
         'RSTOS0'
         'RSTR'
         'S0'
         'S1'
         'S2'
         'S3'
         'SF'
         'SH'])
    dataset['normal.'] = dataset['normal.'].replace([
        'back.', 'buffer_overflow.', 'ftp_write.', 'guess_passwd.', 'imap.',
        'ipsweep.', 'land.', 'loadmodule.', 'multihop.', 'neptune.', 'nmap.',
        'perl.', 'phf.', 'pod.', 'portsweep.', 'rootkit.', 'satan.', 'smurf.',
        'spy.', 'teardrop.', 'warezclient.', 'warezmaster.'
    ], 'attack')
    labelencoder_y = labelencoder_y.fit(['attack', 'normal.'])
    X[:, 1] = labelencoder_x_1.fit_transform(X[:, 1])
    X[:, 2] = labelencoder_x_2.fit_transform(X[:, 2])
    X[:, 3] = labelencoder_x_3.fit_transform(X[:, 3])
    y_train = labelencoder_y.fit_transform(Y)
    scaler = Normalizer().fit(X)
    x_train = scaler.transform(X)
    #print(y_train)
    ann = Sequential()
    ann.add(
        Dense(output_dim=200, init='uniform', activation='relu', input_dim=41))
    ann.add(Dense(output_dim=200, init='uniform', activation='relu'))
    ann.add(Dense(output_dim=200, init='uniform', activation='relu'))
    ann.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))
    ann.compile(optimizer='adam',
                loss='binary_crossentropy',
                metrics=['accuracy'])
    ann.summary()
    ann.fit(x_train, y_train, batch_size=100, nb_epoch=1000)
    #y_pred = ann.predict(x_train)
    #print(y_pred)
    model_json = ann.to_json()
    with open("binaryAnn.json", "w") as json_file:
        json_file.write(model_json)
        ann.save_weights("binaryAnn.h5")
    print("Anamoly based model saved to disk")
from keras.preprocessing.image import ImageDataGenerator
train_gen = ImageDataGenerator(rescale=1. / 255,
                               horizontal_flip=True,
                               shear_range=0.2,
                               zoom_range=0.2)

test_gen = ImageDataGenerator(rescale=1. / 255)

train_data = train_gen.flow_from_directory('data/train',
                                           target_size=(64, 64),
                                           batch_size=5,
                                           color_mode='grayscale',
                                           class_mode='categorical')

test_data = test_gen.flow_from_directory('data/test',
                                         target_size=(64, 64),
                                         batch_size=5,
                                         color_mode='grayscale',
                                         class_mode='categorical')

model.fit_generator(train_data,
                    validation_data=test_data,
                    epochs=10,
                    steps_per_epoch=2599,
                    validation_steps=1002)

json_model = model.to_json()
with open("model-bw.json", "w") as json_file:
    json_file.write(json_model)
model.save_weights('model-bw.h5')
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])


from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json 
# serialize model to JSON

model_json = model.to_json()
with open("model/cnn2.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model/cnn2.h5")
print("Saved model to disk")

######################################################################
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                 activation='relu',
                 input_shape=(256,256,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
def run_final_lstm_model(vocab_size, t, embeddings_index, input_length, dim,
                         padded_docs, labels, X_dev, y_dev, X_test, y_test):
    # TODO: add args voor parameters, change args.name to something short
    le = LabelEncoder()
    le.fit(labels)
    labels = le.transform(labels)
    labels = np_utils.to_categorical(labels)
    y_test = le.transform(y_test)
    y_test = np_utils.to_categorical(y_test)
    y_dev = le.transform(y_dev)
    y_dev = np_utils.to_categorical(y_dev)
    print("ytest", y_test)

    embedding_matrix = zeros((vocab_size, dim))
    for word, i in t.word_index.items():
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = np.array(embedding_vector, dtype="float32")
    # define model
    dev_predictions = []
    test_predictions = []
    with open("results_final_lstm_{0}.txt".format(args.name),
              "w",
              encoding="utf-8") as outfile:
        if args.avg:
            for i in range(1, 11):
                if args.original:
                    # define model
                    model = Sequential()
                    e = Embedding(vocab_size,
                                  dim,
                                  weights=[embedding_matrix],
                                  input_length=input_length,
                                  trainable=False)
                    model.add(e)
                    model.add(LSTM(128))
                    model.add(Dropout(0.5))
                    if args.gender:
                        model.add(Dense(2, activation='softmax'))
                    else:
                        model.add(Dense(9, activation='softmax'))
                    # compile the model
                    model.compile(optimizer='adam',
                                  loss='categorical_crossentropy',
                                  metrics=['acc'])
                    # summarize the model
                    print(model.summary())
                    # fit the model
                    h = model.fit(padded_docs,
                                  labels,
                                  batch_size=16,
                                  epochs=50,
                                  verbose=0)
                    # evaluate the model
                    loss, accuracy = model.evaluate(X_dev, y_dev, verbose=0)
                    dev_preds = model.predict(X_dev,
                                              batch_size=args.batch_size,
                                              verbose=0)
                    dev_predictions.append(dev_preds)
                    np.save(
                        "lstm_model_{0}_dev_preds_run_{1}.npy".format(
                            args.name, i), dev_preds)
                    test_preds = model.predict(X_test,
                                               batch_size=args.batch_size,
                                               verbose=0)
                    print("test preds", test_preds)
                    test_predictions.append(test_preds)
                    np.save(
                        "lstm_model_{0}_test_preds_run_{1}.npy".format(
                            args.name, i), test_preds)
                    model_json = model.to_json()
                    with open(
                            "lstm_model_{0}_run_{1}.json".format(args.name, i),
                            "w") as json_file:
                        json_file.write(model_json)
                    # serialize weights to HDF5
                    model.save_weights(
                        "lstm_model_{0}_run_{1}_weights.h5".format(
                            args.name, i))
                    print("Saved model to disk")
                    print("Accuracy run {0}: {1}\n".format(i, accuracy))
                    outfile.write("Accuracy run {0}: {1}\n".format(
                        i, accuracy))
                elif args.bilstm:
                    model = Sequential()
                    model.add(
                        Embedding(vocab_size,
                                  dim,
                                  weights=[embedding_matrix],
                                  input_length=input_length,
                                  trainable=False))
                    if args.layers == 1:
                        model.add(
                            Bidirectional(LSTM(int(64), activation="tanh")))
                        model.add(Dropout(float(0.3)))
                    elif args.layers == 2:
                        model.add(
                            Bidirectional(
                                LSTM(int(64),
                                     return_sequences=True,
                                     activation="tanh")))
                        model.add(Dropout(float(0.3)))
                        model.add(
                            Bidirectional(LSTM(int(64), activation="tanh")))
                        model.add(Dropout(float(0.3)))
                    if args.gender:
                        model.add(Dense(2, activation='softmax'))
                        ## or
                        #model.add(TimeDistributed(Dense(vocab_size), activation="softmax"))
                    else:
                        model.add(Dense(9, activation='softmax'))
                        ## or
                        #model.add(TimeDistributed(Dense(vocab_size), activation="softmax"))
                    model.compile(loss='categorical_crossentropy',
                                  optimizer="adam",
                                  metrics=["acc"])
                    h = model.fit(padded_docs,
                                  labels,
                                  epochs=30,
                                  batch_size=16)
                    # evaluate the model
                    loss, accuracy = model.evaluate(X_dev, y_dev, verbose=0)
                    dev_preds = model.predict(X_dev, batch_size=16, verbose=0)
                    dev_predictions.append(dev_preds)
                    np.save(
                        "bilstm_model_{0}_dev_preds_run_{1}.npy".format(
                            args.name, i), dev_preds)
                    test_preds = model.predict(X_test,
                                               batch_size=16,
                                               verbose=0)
                    print("test preds", test_preds)
                    test_predictions.append(test_preds)
                    np.save(
                        "bilstm_model_{0}_test_preds_run_{1}.npy".format(
                            args.name, i), test_preds)
                    model_json = model.to_json()
                    with open(
                            "bilstm_model_{0}_run_{1}.json".format(
                                args.name, i), "w") as json_file:
                        json_file.write(model_json)
                    # serialize weights to HDF5
                    model.save_weights(
                        "bilstm_model_{0}_run_{1}_weights.h5".format(
                            args.name, i))
                    print("Saved model to disk")
                    print("Accuracy run {0}: {1}\n".format(i, accuracy))
                    outfile.write("Accuracy run {0}: {1}\n".format(
                        i, accuracy))
                elif args.mlp:
                    model = Sequential()
                    e = Embedding(vocab_size,
                                  dim,
                                  weights=[embedding_matrix],
                                  input_length=input_length,
                                  trainable=False)
                    model.add(e)
                    #model.add(Flatten())
                    model.add(Dense(64, activation='relu'))
                    model.add(Dropout(0.1))
                    model.add(Dense(64, activation='relu'))
                    model.add(Dropout(0.1))
                    model.add(Flatten())
                    if args.gender:
                        model.add(Dense(2, activation='softmax'))
                    else:
                        model.add(Dense(9, activation='softmax'))
                    # compile the model
                    model.compile(optimizer="adam",
                                  loss='categorical_crossentropy',
                                  metrics=['acc'])
                    # summarize the model
                    print(model.summary())
                    # fit the model
                    model.fit(padded_docs,
                              labels,
                              batch_size=32,
                              epochs=15,
                              verbose=0)
                    # evaluate the model
                    loss, accuracy = model.evaluate(X_dev, y_dev, verbose=0)
                    dev_preds = model.predict(X_dev,
                                              batch_size=args.batch_size,
                                              verbose=0)
                    dev_predictions.append(dev_preds)
                    np.save(
                        "mlp_model_{0}_dev_preds_run_{1}.npy".format(
                            args.name, i), dev_preds)
                    test_preds = model.predict(X_test,
                                               batch_size=args.batch_size,
                                               verbose=0)
                    print("test preds", test_preds)
                    test_predictions.append(test_preds)
                    np.save(
                        "mlp_model_{0}_test_preds_run_{1}.npy".format(
                            args.name, i), test_preds)
                    model_json = model.to_json()
                    with open(
                            "mlp_model_{0}_run_{1}.json".format(args.name, i),
                            "w") as json_file:
                        json_file.write(model_json)
                    # serialize weights to HDF5
                    model.save_weights(
                        "mlp_model_{0}_run_{1}_weights.h5".format(
                            args.name, i))
                    print("Saved model to disk")
                    print("Accuracy run {0}: {1}\n".format(i, accuracy))
                    outfile.write("Accuracy run {0}: {1}\n".format(
                        i, accuracy))
                else:
                    model = Sequential()
                    e = Embedding(vocab_size,
                                  dim,
                                  weights=[embedding_matrix],
                                  input_length=input_length,
                                  trainable=False)
                    model.add(e)
                    model.add(LSTM(args.neurons, return_sequences=True))
                    model.add(
                        LSTM(args.neurons, return_sequences=True)
                    )  # returns a sequence of vectors of dimension 32
                    model.add(LSTM(args.neurons)
                              )  # return a single vector of dimension 32
                    model.add(Dropout(args.dropout))
                    if args.gender:
                        model.add(Dense(2, activation='softmax'))
                        ## or
                        #model.add(TimeDistributed(Dense(vocab_size), activation="softmax"))
                    else:
                        model.add(Dense(9, activation='softmax'))
                        ## or
                        #model.add(TimeDistributed(Dense(vocab_size), activation="softmax"))
                    # compile the model
                    model.compile(optimizer=args.opt,
                                  loss='categorical_crossentropy',
                                  metrics=['acc'])
                    # summarize the model
                    print(model.summary())
                    # fit the model
                    h = model.fit(padded_docs,
                                  labels,
                                  batch_size=args.batch_size,
                                  epochs=args.epochs,
                                  verbose=0)
                    # evaluate the model
                    loss, accuracy = model.evaluate(X_dev, y_dev, verbose=0)
                    dev_preds = model.predict(X_dev,
                                              batch_size=args.batch_size,
                                              verbose=0)
                    dev_predictions.append(dev_preds)
                    np.save(
                        "lstm_model_{0}_dev_preds_run_{1}.npy".format(
                            args.name, i), dev_preds)
                    test_preds = model.predict(X_test,
                                               batch_size=args.batch_size,
                                               verbose=0)
                    print("test preds", test_preds)
                    test_predictions.append(test_preds)
                    np.save(
                        "lstm_model_{0}_test_preds_run_{1}.npy".format(
                            args.name, i), test_preds)
                    model_json = model.to_json()
                    with open(
                            "lstm_model_{0}_run_{1}.json".format(args.name, i),
                            "w") as json_file:
                        json_file.write(model_json)
                    # serialize weights to HDF5
                    model.save_weights(
                        "lstm_model_{0}_run_{1}_weights.h5".format(
                            args.name, i))
                    print("Saved model to disk")
                    print("Accuracy run {0}: {1}\n".format(i, accuracy))
                    outfile.write("Accuracy run {0}: {1}\n".format(
                        i, accuracy))

            averaged_dev_predictions = [
                list(np.mean(item, axis=0))
                for item in list(zip(*dev_predictions))
            ]
            averaged_test_predictions = [
                list(np.mean(item, axis=0))
                for item in list(zip(*test_predictions))
            ]
            np.save(
                "lstm_model_{0}_averaged_dev_predictions_runs1_10.npy".format(
                    args.name), averaged_dev_predictions)
            np.save(
                "lstm_model_{0}_averaged_test_predictions_runs1_10.npy".format(
                    args.name), averaged_test_predictions)
            print(
                "averaged accuracy",
                accuracy_score(np.argmax(y_test, axis=1),
                               np.argmax(averaged_test_predictions, axis=1)))
        else:
            model = Sequential()
            e = Embedding(vocab_size,
                          dim,
                          weights=[embedding_matrix],
                          input_length=input_length,
                          trainable=False)
            model.add(e)
            model.add(LSTM(args.neurons, return_sequences=True))
            model.add(LSTM(args.neurons, return_sequences=True)
                      )  # returns a sequence of vectors of dimension 32
            model.add(LSTM(
                args.neurons))  # return a single vector of dimension 32
            model.add(Dropout(args.dropout))
            if args.gender:
                model.add(Dense(2, activation='softmax'))
                ## or
                #model.add(TimeDistributed(Dense(vocab_size), activation="softmax"))
            else:
                model.add(Dense(9, activation='softmax'))
                ## or
                #model.add(TimeDistributed(Dense(vocab_size), activation="softmax"))
            # compile the model
            model.compile(optimizer=args.opt,
                          loss='categorical_crossentropy',
                          metrics=['acc'])
            # summarize the model
            print(model.summary())
            # fit the model
            h = model.fit(padded_docs,
                          labels,
                          batch_size=args.batch_size,
                          epochs=args.epochs,
                          verbose=0)
            # evaluate the model
            loss, accuracy = model.evaluate(X_test, y_test, verbose=0)
            print(
                "Accuracy: {0}, with batch_size {1}, epochs {2}, optimizer {3}, dropout {4}, neurons {5}\n"
                .format(accuracy, args.batch_size, args.epochs, args.opt,
                        args.dropout, args.neurons))
            outfile.write(
                "Accuracy: {0}, with batch_size {1}, epochs {2}, optimizer {3}, dropout {4}, neurons {5}\n"
                .format(accuracy, args.batch_size, args.epochs, args.opt,
                        args.dropout, args.neurons))
Exemple #47
0
                    train_y,
                    epochs=100,
                    batch_size=1,
                    validation_data=(test_X, test_y),
                    verbose=2,
                    shuffle=False,
                    callbacks=[early_stop])
scores = model.evaluate(train_X, train_y, verbose=0, batch_size=1)
print("%s: %.2f%%" % (model.metrics_names, scores))

# serialize model to YAML
model_yaml = model.to_yaml()
with open("model_device_stateful_6.yaml", "w") as yaml_file:
    yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights("model_device_stateful_6.h5")
print("Saved model to disk")

# later...

# load YAML and create model
yaml_file = open('model_device_stateful_6.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model.load_weights("model_device_stateful_6.h5")
print("Loaded model from disk")

# make a prediction
yhat = loaded_model.predict(test_X, batch_size=1)
Exemple #48
0
def main():
    classifier = Sequential()

    # Step 1: Convolution
    '''
        Conv2D(filter,shape,imageShape,activationFunction)
        '''
    classifier.add(
        Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation='relu'))

    # Step 2 : Pooling
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    # Step 3 : Flatenning
    classifier.add(Flatten())

    # Step 4 : Full Connection
    classifier.add(Dense(units=128, activation='relu'))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy'])

    # Image Pre-Processing
    '''
        Source of Image Processing https://keras.io/preprocessing/image/
        '''

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    training_set = train_datagen.flow_from_directory('dataSets/training_set',
                                                     target_size=(64, 64),
                                                     batch_size=32,
                                                     class_mode='binary')

    test_set = test_datagen.flow_from_directory('dataSets/test_set',
                                                target_size=(64, 64),
                                                batch_size=32,
                                                class_mode='binary')

    classifier.fit_generator(training_set,
                             steps_per_epoch=8000,
                             epochs=25,
                             validation_data=test_set,
                             validation_steps=2000)

    # ------------------------------------
    # Saving Model to Local
    # ------------------------------------

    # serialize model to JSON
    model_json = classifier.to_json()
    with open("model.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    classifier.save_weights("model.h5")
    print("Saved model to disk")

    # ------------------------------------
    # Loading Model
    # ------------------------------------

    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("model.h5")
    print("Loaded model from disk")
    loaded_model.compile(optimizer='adam',
                         loss='binary_crossentropy',
                         metrics=['accuracy'])

    # Testing With Data
    import numpy as np
    from keras.preprocessing import image

    test_image = image.load_img('dataSets/single_prediction/cat_or_dog_1.jpg',
                                target_size=(64, 64))
    test_image = image.img_to_array(test_image)
    test_image = np.expand_dims(test_image, axis=0)

    #result = classifier.predict(test_image)

    result = loaded_model.predict(test_image)

    training_set.class_indices

    if result[0][0] == 1:
        prediction = 'dog'
    else:
        prediction = 'cat'
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
print(X_train[0])

embedding_vector_length = 32

model = Sequential()
model.add(Embedding(400, embedding_vector_length, input_length=max_review_length))
model.add(LSTM(64, input_dim=64, return_sequences=True))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(16, return_sequences=False))
model.add(Dense(3, activation='sigmoid'))

adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='sparse_categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print(model.summary())

model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=10)

scores = model.evaluate(X_test, y_test, verbose=1)

print("Accuracy: %.2f%%" % (scores[1]*100))

model_json = model.to_json()
with open("model.json", "w") as json_file :
    json_file.write(model_json)

model.save_weights("model.h5")
print("Saved model to disk")

Exemple #50
0
test_datagen = ImageDataGenerator()

train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=BATCH_SIZE,
    color_mode="grayscale",
    shuffle=True,
    class_mode='categorical')

test_generator = test_datagen.flow_from_directory(
    test_data_dir,
    target_size=(img_width, img_height),
    batch_size=BATCH_SIZE,
    color_mode="grayscale",
    shuffle=True,
    class_mode='categorical')

# Fit the model
model.fit_generator(
    generator=train_generator,
    samples_per_epoch=1550,
    nb_epoch=EPOCHS,
    callbacks=callbacks_list,
    validation_data=test_generator,
    nb_val_samples=nb_test_samples // BATCH_SIZE)

# Save the models
model.save_weights(os.path.join(BASEPATH, 'weights', names['weights']))
model.save(os.path.join(BASEPATH, 'model', names['model']))
Exemple #51
0
def build_load_model(trainX, trainY, testX, testY, validX, validY):
    if not os.path.isfile('model/cnn.model.h5') or Retrain:
        model = Sequential()
        feat_dim = numpy.shape(trainX)[-1]
        VERBOSE = 1
        # print(trainX)ValueError: Input 0 is incompatible with layer conv2d_1: expected ndim=4, found ndim=2
        # print('type Of trainX',type(trainX))
        # print(trainX.shape[1:])
        # print('type Of trainX', type(trainX.shape[1:]))
        # print('type Of trainX', type(trainX.shape[1]))
        # model.add(Conv1D((4,4),input_shape=(time_step,feat_dim),activation='relu')))#,padding='same',input_shape=trainX.shape[1:]
        model.add(
            Conv1D(4, 4, input_shape=(time_step, feat_dim), activation='relu'))
        # model.add(Activation('relu'))
        # model.add(Conv2D(32,(3,3)))
        # model.add(Activation('relu'))
        # model.add(MaxPooling2D(pool_size=(2,2)))
        # model.add(Dropout(0.25))

        # model.add(Conv2D(64,(3,3),padding='same'))
        # model.add(Activation('relu'))
        # model.add(Conv2D(64,(3,3)))
        # model.add(Activation('relu'))
        # model.add(MaxPooling2D(pool_size=(2,2)))
        # model.add(Dropout(0.25))

        # model.add(Flatten())
        # model.add(Dense(512))
        # model.add(Activation('relu'))
        # # model.add(Dropout(0.5))
        # model.add(Dense(num_classes))
        # model.add(Activation('softmax'))
        #
        #
        # opt = keras.optimizers.rmsprop(lr=0.0001,decay=1e-6)
        #
        #
        # model.compile(loss='binary_crossentropy',
        #               optimizer='sgd',
        #               metrics=['accuracy'])
        # ,'precision','recall','fmesaure'
        model.add(MaxPooling1D(2))
        model.add(Conv1D(4, 4, activation='relu'))
        model.add(MaxPooling1D(2))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        print(model.summary())

        callbacks = [EarlyStopping(monitor='val_loss', patience=2, verbose=0)]

        # trainX = trainX.astype('float32')
        # testX = testX.astype('float32')
        # trainX /= 255
        # testX /= 255

        model.fit(trainX,
                  trainY,
                  batch_size=128,
                  nb_epoch=10000,
                  callbacks=callbacks,
                  validation_data=(validX, validY),
                  verbose=VERBOSE)
        model.save('model/cnn.model.h5')
        model.save_weights('model/cnn.weights.h5')
    elif Retrain != True and os.path.isfile(
            model_saved_path) and os.path.isfile(model_weights_saved_path):
        model = load_model('model/cnn.model.h5')
        model.load_weights('model/cnn.weights.h5')
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
    else:
        model = 'null'
        print('error')

    loss_and_metrics = model.evaluate(testX, testY, batch_size=128, verbose=0)
    predictY = model.predict_classes(testX)
    print(loss_and_metrics)
    return 0
checkpoint = ModelCheckpoint(filepath=modelpath,
                             monitor='val_loss',
                             save_best_only=True,
                             save_weights_only=False,
                             verbose=1)

model.compile(loss='mse', optimizer='adam', metrics=['mse'])
hist = model.fit(x_train,
                 y_train,
                 epochs=10000,
                 callbacks=[checkpoint],
                 validation_split=(0.3))
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=14)

model.save('./model/sample/boston/model_test01.h5')
model.save_weights('./model/sample/boston/test_weight1.h5')

y_predict = model.predict(x_test)
#------------------------------------------------------------------------------------------------------------------#
'''
1.pca구하는 법???
2.데이터max구하는법?
3.분류와 회귀 구별
4.
'''
from sklearn.metrics import mean_squared_error as mse, r2_score


def RMSE(y_test, y_predict):
    return np.sqrt(mse(y_test, y_predict))
Exemple #53
0
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_data_dir,
                                                    target_size=(img_width,
                                                                 img_height),
                                                    batch_size=batch_size,
                                                    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

#model.load_weights(networkFile)

history = model.fit_generator(train_generator,
                              steps_per_epoch=nb_train_samples // batch_size,
                              epochs=epochs,
                              validation_data=validation_generator,
                              validation_steps=nb_validation_samples //
                              batch_size)
printHistory(history, insulators)
model.save_weights(networkFile)
Exemple #54
0
                                   data_size=data_size,
                                   shingle_size=shingle_dim)
    X_train = 1.0 - X_train / 255.0
    X_train = np.expand_dims(X_train, 1)
    X_train = randangle(X_train)
    Y_train = to_categorical(Y_train, num_authors)
    return (X_train, Y_train)


if True:
    for batch_iter in xrange(total_iters):
        # [X_train, Y_train] = iam_m.get_train_batch(batch_size*100)
        print "Getting batch number " + str(batch_iter) + "."
        (X_train, Y_train) = get_pbatch(author_hdf5_file,
                                        author_ids,
                                        data_size=32 * 1000,
                                        shingle_dim=shingle_dim)
        print "Batch iteration " + str(batch_iter) + "/" + str(
            total_iters) + " on " + str(num_authors) + " authors."
        model.fit(X_train,
                  Y_train,
                  batch_size=batch_size,
                  nb_epoch=1,
                  show_accuracy=True,
                  verbose=1)  #, validation_data=(X_test, Y_test))
        # model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=1, show_accuracy=True, verbose=1, validation_data=(X_train, Y_train))
        if (batch_iter % 20) == 0 and batch_iter != 0:
            model.save_weights('fielnet120-300.hdf5', overwrite=True)

    model.save_weights('fielnet120-300.hdf5', overwrite=True)
Exemple #55
0
m = Sequential()
m.add(Dense(20, input_shape=(d_in, ), activation='relu'))
# m.add(Dropout(0.2))
m.add(Dense(20, activation='relu'))
m.add(Dense(20, activation='relu'))
# m.add(Dropout(0.2))
m.add(Dense(d_out, activation='softmax'))
m.compile(loss='categorical_crossentropy',
          optimizer='rmsprop',
          metrics=['accuracy'])

pred = None

if do_train:
    m.fit(X, Y, validation_split=0.2, batch_size=1024, epochs=50, verbose=1)
    m.save_weights("weights")
    pred = m.predict_proba(X)
    with open("predictions.txt", "w", newline='') as out:
        wr = csv.writer(out, delimiter='\t')
        for row in pred:
            wr.writerow(row)
else:
    m.load_weights("weights")
    pred = pd.read_csv("predictions.txt", sep='\t', dtype=float,
                       header=None).values

show_cols = [
    #"click_weekday",
    #"click_hour",
    #"ln_country_popularity",
    "orig_destination_distance",
Exemple #56
0
#		print(str(i) + ' --> Predicted: ' +  str(y_pred[i]) + " Expected: " + str(y_true[i]))

# --------------------------------------------------------------------
# Draw the confusion matrix
cm = confusion_matrix(y_pred, y_true, labels=range(num_classes))

labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()

# --------------------------------------------------------------------
# Evaluate the model on the test set
scores = model.evaluate(x_test, y_test)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

# --------------------------------------------------------------------
# Save the model and the weights
model_json = model.to_json()
with open("./model/model.json", "w") as json_file:
    json_file.write(model_json)
model.save_weights("./model/model.h5")
print("Saved model to disk")
model.add(Activation('relu'))

# FC Layer 3
model.add(Dense(32))
model.add(Activation('relu'))

# Final FC Layer - just one output - steering angle
model.add(Dense(1))

# Compiling and training the model
model.compile(metrics=['mean_squared_error'],
              optimizer='Nadam',
              loss='mean_squared_error')

model.fit(X_train,
          y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          verbose=2,
          validation_data=(X_val, y_val))

# Save model architecture and weights
model_json = model.to_json()
with open("./model.json", "w") as json_file:
    json.dump(model_json, json_file)

model.save_weights('./model.h5')

# Show summary of model
model.summary()
daily_error = []
rush_hour_error = []

for day in days:
    day_real = predictions[predictions.index.day == day[2]]['Real'].values
    day_pred = predictions[predictions.index.day ==
                           day[2]]['Predictions'].values
    daily_error.append(bm.mean_absolute_percentage_error(day_real, day_pred))

    rush_real = rush_hour_predictions[rush_hour_predictions.index.day ==
                                      day[2]]['Real'].values
    rush_pred = rush_hour_predictions[rush_hour_predictions.index.day ==
                                      day[2]]['Predictions'].values
    rush_hour_error.append(
        bm.mean_absolute_percentage_error(rush_real, rush_pred))

from datetime import date
daily_error = np.array(daily_error).transpose()
rush_hour_errors = np.array(rush_hour_error).transpose()
print(daily_error.shape)
indexes = [date(day[0], day[1], day[2]).ctime() for day in days]
data = {'Daily Error': daily_error, 'Rush Hour Error': rush_hour_error}

errors = pd.DataFrame(index=indexes, data=data)
errors.index.name = 'Date'

#saving everything
regressor.save_weights(PLACE + "_weights.h5")
errors.to_csv(PLACE + "_Daily_Errors.csv")
predictions.to_csv(PLACE + "_Estimations.csv")
print(PLACE)
Exemple #59
0
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    epochs=10,
                    batch_size=32,
                    validation_split=0.2)

import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

# Save weights to a TensorFlow Checkpoint file
model.save_weights('./my_model_sgsp3')

# Restore the model's state,
# this requires a model with the same architecture.
model.load_weights('my_model_sgsp3')

model.save("CNN.model_sgsp3")
def build_model(X_train, X_test, Y_train, noLSTM, train_labels):
    model = Sequential()
    model.reset_states()
    with codecs.open(rootFolder + "training.csv", 'a') as logfile:
        fieldnames = ['lstms', 'outpts']
        writer = csv.DictWriter(logfile, fieldnames=fieldnames)
        writer.writerow({'lstms': noLSTM[0], 'outpts': noLSTM[1]})
        print(noLSTM[0], " >> ", noLSTM[1])

    # input
    model.add(
        Dense(X_train.shape[1],
              input_dim=(X_train.shape[1]),
              init="uniform",
              activation="sigmoid"))

    # dense
    for p in range(noLSTM[0]):
        model.add(Dense(noLSTM[1], activation='tanh', use_bias=True))

    model.add(Dropout(0.5))
    model.add(Dense(3))
    model.add(Activation('softmax'))

    #   ['acc', 'loss', 'val_acc', 'val_loss']
    opt = Adam(lr=0.0011, decay=0.001)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    fnametmp = rootFolder + "plot-{}-{}-{}.png".format("Model", noLSTM[0],
                                                       noLSTM[1])
    plot_model(model,
               to_file=fnametmp,
               show_shapes=True,
               show_layer_names=True,
               rankdir='TB')

    epoch_count = 50
    _patience = min(30, max(epoch_count // 5, 20))
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=_patience,
                                   verbose=1,
                                   mode='auto')
    tn = TerminateOnNaN()
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.2,
                                  min_lr=1e-7,
                                  verbose=1)
    checkpoint_path = os.path.join(
        rootFolder,
        "weights.best_{}_{}_{}.hdf5".format("model", noLSTM[0], noLSTM[1]))
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    csv_logger = CSVLogger(rootFolder + 'training.csv', append=True)
    early_stop = EarlyStopping(monitor='val_acc',
                               patience=1,
                               verbose=2,
                               mode='auto')
    callback_fns = [early_stopping, tn, csv_logger, checkpoint, reduce_lr]
    history = model.fit(X_train,
                        Y_train,
                        batch_size=1,
                        epochs=50,
                        callbacks=callback_fns,
                        validation_split=0.2,
                        shuffle=True)

    fnametmp = rootFolder + "model_{}_{}_{}".format("Model", noLSTM[0],
                                                    noLSTM[1])
    model.save_weights(fnametmp + '.h5')
    with open(fnametmp + '.json', 'w') as f:
        f.write(model.to_json())
    fnametmp = "plot-{}-{}-{}.png".format("model-accuracy", noLSTM[0],
                                          noLSTM[1])
    drawMe(yVal=history.history['acc'],
           xVal=history.history['val_acc'],
           title='model accuracy',
           xlabel='epoch',
           ylabel='accuracy',
           legend=['train', 'test'],
           save=True,
           fileName=fnametmp,
           show=False)
    fnametmp = "plot-{}-{}-{}.png".format("model-loss", noLSTM[0], noLSTM[1])
    drawMe(yVal=history.history['loss'],
           xVal=history.history['val_loss'],
           title='model loss',
           xlabel='epoch',
           ylabel='loss',
           legend=['train', 'test'],
           save=True,
           fileName=fnametmp,
           show=False)
    pred = model.predict(X_test)
    compute_accuracy(pred, train_labels)