def Model(weights_path=None):
    model = Sequential()
    model.add(GaussianNoise(0.01, input_shape=(1, img_rows, img_cols)))
    model.add(Convolution2D(nb_filters[0], nb_conv, nb_conv,
                            border_mode='valid'))
    model.add(Activation('relu'))

    model.add(Convolution2D(nb_filters[1], nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(nb_filters[2], nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(nb_filters[3], nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
def CIFAR_10(img_rows, img_cols, img_channels=3, nb_classes=5, weights_path=None):
    model = Sequential()

    model.add(Convolution2D(32, 3, 3, border_mode='same',
                            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
Ejemplo n.º 3
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test load_weights on model file
    model.load_weights(fname)
    os.remove(fname)
Ejemplo n.º 4
0
def initial_num_char_phase1():
    """
    识别二值化图像的字符个数
    :param bw: 二值图像
    :return:
    """
    # 加载模型
    model = Sequential()
    model.add(Convolution2D(4, 5, 5, input_shape=(1, 30, 40), border_mode='valid'))
    model.add(Activation('tanh'))

    model.add(Convolution2D(8, 5, 5, input_shape=(1, 26, 36), border_mode='valid'))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.55))

    model.add(Convolution2D(16, 4, 4, input_shape=(1, 11, 16), border_mode='valid'))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.60))

    model.add(Flatten())
    model.add(Dense(input_dim=16*4*6, output_dim=256, init='glorot_uniform'))
    model.add(Activation('tanh'))

    model.add(Dense(input_dim=256, output_dim=2, init='glorot_uniform'))
    model.add(Activation('softmax'))

    # 加载权值
    model.load_weights('model/train_len_size1.d5')

    sgd = SGD(l2=0.0, lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode="categorical")

    return model
Ejemplo n.º 5
0
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
    def __init__(self, restore=None, session=None, Dropout=Dropout, num_labels=10):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = num_labels

        model = Sequential()

        nb_filters = 64
        layers = [Conv2D(nb_filters, (5, 5), strides=(2, 2), padding="same",
                         input_shape=(28, 28, 1)),
                  Activation('relu'),
                  Conv2D(nb_filters, (3, 3), strides=(2, 2), padding="valid"),
                  Activation('relu'),
                  Conv2D(nb_filters, (3, 3), strides=(1, 1), padding="valid"),
                  Activation('relu'),
                  Flatten(),
                  Dense(32),
                  Activation('relu'),
                  Dropout(.5),
                  Dense(num_labels)]

        for layer in layers:
            model.add(layer)

        if restore != None:
            model.load_weights(restore)
        
        self.model = model
Ejemplo n.º 7
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
def get_conv(input_shape=(48, 48, 1), filename=None):
    model = Sequential()
#    model.add(Lambda(lambda x: (x-np.mean(x))/np.std(x),input_shape=input_shape, output_shape=input_shape))
    model.add(Conv2D(32, (3, 3), activation='relu', name='conv1',
                     input_shape=input_shape, padding="same"))
    model.add(Conv2D(32, (3, 3), activation='relu',
                     name='conv2', padding="same"))
    model.add(MaxPooling2D(pool_size=(4, 4)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), name='conv3', padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), name='conv4', padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(4, 4)))
    model.add(Dropout(0.25))

    model.add(Conv2D(256, (3, 3), activation="relu",
                     name="dense1"))  # This was Dense(128)
    model.add(Dropout(0.5))
    # This was Dense(1)
    model.add(Conv2D(1, (1, 1), name="dense2", activation="tanh"))
    if filename:
        model.load_weights(filename)
    return model
Ejemplo n.º 9
0
def neural_net(num_sensors, params, load=''):
    model = Sequential()

    # First layer.
    model.add(Dense(
        params[0], init='lecun_uniform', input_shape=(num_sensors,)
    ))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))

    # Second layer.
    model.add(Dense(params[1], init='lecun_uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))

    # Output layer.
    model.add(Dense(3, init='lecun_uniform'))
    model.add(Activation('linear'))

    rms = RMSprop()
    model.compile(loss='mse', optimizer=rms)

    if load:
        model.load_weights(load)

    return model
Ejemplo n.º 10
0
def part4(weights):
    global X_train, Y_train, X_test, Y_test
    size = 3
    model = Sequential()

    model.add(Convolution2D(32, size, size, border_mode="same", input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model = add_top_layer(model)

    model.load_weights(weights)

    model = copy_freeze_model(model, 4)

    # add top dense layer
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation("softmax"))

    X_train, Y_train, X_test, Y_test = load_data(10)

    model = train(model, auto=False)
    print("Classification rate %02.5f" % (model.evaluate(X_test, Y_test, show_accuracy=True)[1]))
Ejemplo n.º 11
0
def getmodel():
	nb_classes = 2
	model = Sequential()
	model.add(Convolution2D(32, 3, 3, border_mode='same',
	                        input_shape=(1, RECEP_HEI, RECEP_WEI)))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Convolution2D(32, 3, 3))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Flatten())
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
	model.add(Dense(nb_classes))
	model.add(Activation('softmax'))

	sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
	model.load_weights('weights.hdf5')
	model.compile(loss='categorical_crossentropy',
	              optimizer=sgd,
	              metrics=['accuracy'])
	# model.fit(X_train, Y_train, batch_size=32, nb_epoch=1,
          # verbose=1, shuffle = True ,validation_split=0.25)
	return model
def vgg_train(weights=None):
    print "Compiling VGG Model..." 
    model = Sequential()
    # input: 64x64 images with 3 channels -> (3, 64, 64) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(3,64,64)))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.50))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(256,W_regularizer=WeightRegularizer(l1=1e-6,l2=1e-6)))
    model.add(Activation('relu'))
    model.add(Dropout(0.50))

    model.add(Dense(200,W_regularizer=WeightRegularizer(l1=1e-5,l2=1e-5)))
    model.add(Activation('softmax'))
    if weights!=None: 
        model.load_weights(weights) 
    return model 
def inference_dense(input_dim, class_num, optimizer='sgd', weights_file=''):
    model = Sequential()
    
    model.add(Dense(2048, input_dim=input_dim))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
#    model.add(Dense(256))
#    model.add(Activation('relu'))
#    model.add(Dropout(0.5))
    
    model.add(Dense(class_num))
    model.add(Activation('softmax'))
    
    if weights_file:
        model.load_weights(weights_file)
#    adadelta = Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)
    if optimizer == 'sgd':
        opt = SGD(lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True)
    elif optimizer == 'adam':
        opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    elif optimizer == 'adagrad':
        opt = Adagrad(lr=0.01, epsilon=1e-08)
    elif optimizer == 'adadelta':
        opt = Adadelta(lr=1.0, rho=0.95, epsilon=1e-08)
    elif optimizer == 'rmsprop':
        opt = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08)
    print('compiling model....')
    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
    
    return model
Ejemplo n.º 14
0
def get_nn_model(token_dict_size):
    _logger.info('Initializing NN model with the following params:')
    _logger.info('Input dimension: %s (token vector size)' % TOKEN_REPRESENTATION_SIZE)
    _logger.info('Hidden dimension: %s' % HIDDEN_LAYER_DIMENSION)
    _logger.info('Output dimension: %s (token dict size)' % token_dict_size)
    _logger.info('Input seq length: %s ' % INPUT_SEQUENCE_LENGTH)
    _logger.info('Output seq length: %s ' % ANSWER_MAX_TOKEN_LENGTH)
    _logger.info('Batch size: %s' % SAMPLES_BATCH_SIZE)

    model = Sequential()
    seq2seq = SimpleSeq2seq(
        input_dim=TOKEN_REPRESENTATION_SIZE,
        input_length=INPUT_SEQUENCE_LENGTH,
        hidden_dim=HIDDEN_LAYER_DIMENSION,
        output_dim=token_dict_size,
        output_length=ANSWER_MAX_TOKEN_LENGTH,
        depth=1
    )

    model.add(seq2seq)
    model.compile(loss='mse', optimizer='rmsprop')

    model.save_weights(NN_MODEL_PATH)

    # use previously saved model if it exists
    _logger.info('Looking for a model %s' % NN_MODEL_PATH)

    if os.path.isfile(NN_MODEL_PATH):
        _logger.info('Loading previously calculated weights...')
        model.load_weights(NN_MODEL_PATH)

    _logger.info('Model is built')
    return model
Ejemplo n.º 15
0
    def get_model(self):
        classes = 36
        # data = np.empty((57218, 1, 24, 24), dtype="float32")
        model = Sequential()
        model.add(Convolution2D(4, 5, 5, border_mode='valid', input_shape=(1, 24, 24)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))

        model.add(Convolution2D(8, 3, 3, border_mode='valid'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Convolution2D(16, 3, 3, border_mode='valid'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        # model.add(Dropout(0.5))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(128, init='normal'))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))
        model.add(Dense(classes, init='normal'))
        model.add(Activation('softmax'))
        sgd = SGD(l2=0.0, lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode="categorical")
        model.load_weights("eduLogin/captcha/tmp/weights.11-0.05.h5")
        return model
Ejemplo n.º 16
0
  def build(width, height, depth, num_classes, weights_path=None, dropout=True):
    model = Sequential()

    # First set of CONV => RELU => POOL.
    model.add(Convolution2D(20, 5, 5, border_mode='same',
                            input_shape=(height, width, depth)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Second set of CONV => RELU => POOL.
    model.add(Convolution2D(50, 5, 5, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Last set of FC => RELU layers
    model.add(Flatten())
    model.add(Dense(500))
    model.add(Activation('relu'))
    if dropout:
        model.add(Dropout(.5))

    # softmax classifier
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    # If a weights path is supplied (inicating that the model was
    # pre-trained), then load the weights
    if weights_path is not None:
      model.load_weights(weights_path)

    # return the constructed network architecture
    return model
Ejemplo n.º 17
0
def predict_ranking(evalFile, outFile):

    X, qids, pids = load_data(evalFile)
    input_dim = X[0].shape[1]

    assert len(pids[0]) == len(X[0])

    model = Sequential()
    model.add(Dense(64, input_dim=input_dim, init='uniform', activation='relu'))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    weightsFile = '../model/weights.hdf5'
    model.load_weights(weightsFile)

    Y_p = []
    for x in X:
        Y_p.append(model.predict(x))

    f = open(outFile, 'w')

    for n, qid in enumerate(qids):
        tupes = zip(Y_p[n], pids[n])
        sortedTupes = sorted(tupes, key=lambda x: x[0], reverse=True)
        for n, (y, pid) in enumerate(sortedTupes):
            f.write('{}\tITER\t{}\t{}\t{}\tSOMEID\n'.format(qid, pid, n, 1001-n))
Ejemplo n.º 18
0
def spatialNet(weights=None):
    model = Sequential()
    leakyrelu = ELU()

    model.add(Convolution3D(30, 10, 17, 17, subsample=(2,2,2), trainable=False, input_shape=(1, 60,32,32)))
    model.add(Activation(leakyrelu))
    model.add(BatchNormalization(mode=2))
    model.add(MaxPooling3D(pool_size=(13, 2, 2),trainable=False, strides=(13,2, 2)))

    model.add(Reshape((60, 4, 4)))


    model.add(Convolution2D(100, 3, 3, trainable=False))
    model.add(Activation(leakyrelu))
    model.add(BatchNormalization(mode=2))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), trainable=False))
    model.add(Flatten())


    model.add(Dense(400, trainable=False))
    model.add(Activation(leakyrelu))
    model.add(BatchNormalization(mode=2))
    model.add(Dense(50))
    model.add(Activation(leakyrelu))
    model.add(BatchNormalization(mode=2))

    model.add(Dense(1, activation='sigmoid'))
    model.add(BatchNormalization(mode=2))

    if weights:
        model.load_weights(weights)

    return model
def home_made_convnet(Xshape, Yshape):
    print 'initializing model...'

    model = Sequential()
    model.add(ZeroPadding2D((2, 2), input_shape=(Xshape)))  # 0
    model.add(Convolution2D(64, 5, 5, activation='relu'))  # 1
    model.add(Convolution2D(64, 5, 5, activation='relu'))  # 3
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))  # 4

    model.add(Convolution2D(128, 3, 3, activation='relu'))  # 6
    model.add(Convolution2D(128, 3, 3, activation='relu'))  # 8
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))  # 9

    model.add(Flatten())  # 31
    model.add(Dense(256, activation='relu'))  # 32
    model.add(Dropout(0.5))  # 33
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(Yshape, activation='softmax'))

    model.load_weights('weights_f.h5')

    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Ejemplo n.º 20
0
Archivo: model.py Proyecto: blazer82/ai
class Model:
	def __init__(self, init='normal', activation='relu', batch_size=32, lr=1e-2, load=None):
		self.batch_size = batch_size
		self.model = Sequential()

		self.model.add(Convolution1D(8, 1, input_shape=(2, 4), init=init))
		self.model.add(Activation(activation))

		self.model.add(Flatten())

		self.model.add(Dense(8, init=init))
		self.model.add(BatchNormalization())
		self.model.add(Activation(activation))

		self.model.add(Dense(8, init=init))
		self.model.add(BatchNormalization())
		self.model.add(Activation(activation))

		self.model.add(Dense(2, init=init))

		if load != None:
			self.model.load_weights(load)

		self.model.compile(SGD(lr=lr), loss='mse')

	def predict(self, X):
		return self.model.predict(X.reshape((1,) + X.shape))[0]

	def learn(self, X, y):
		return self.model.fit(X, y, nb_epoch=1, batch_size=self.batch_size, shuffle=True, verbose=0)

	def save(self, filename):
		return self.model.save_weights(filename, overwrite=True)
Ejemplo n.º 21
0
def temporalNet(weights=None):
    model = Sequential()

    model.add(Convolution3D(30, 20, 17, 17, activation='relu', trainable=False, subsample=(4,2,2), input_shape=(1, 120,32,32)))


    model.add(MaxPooling3D(pool_size=(13, 2, 2), strides=(13,2, 2), trainable=False))

    model.add(Reshape((60, 4, 4)))


    model.add(Convolution2D(100, 3, 3, activation='relu', trainable=False))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), trainable=False))
    model.add(Flatten())


    model.add(Dense(400, activation='relu', trainable=False))

    model.add(Dense(50, activation='relu'))
    #model.add(Dense(14, activation='relu'))
    model.add(Dense(1, activation='relu'))

    if weights:
        model.load_weights(weights)

    return model
def M8(weights_path=None, input_shape=(1, 64, 64), n_output=None):
    model = Sequential()
    model.add(Convolution2D(
        32, 3, 3, border_mode='same', input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Convolution2D(128, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(128, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(n_output))
    model.add(Activation('softmax'))
    if weights_path:
        model.load_weights(weights_path)
    return model
Ejemplo n.º 23
0
def Colorize(weights_path=None):
    model = Sequential()
    # input: 100x100 images with 3 channels -> (3, 100, 100) tensors.
    # this applies 32 convolution filters of size 3x3 each.

    model.add(Convolution2D(512, 1, 1, border_mode='valid',input_shape=(960,224,224)))
    model.add(Activation('relu'))
    model.add(normalization.BatchNormalization())

    model.add(Convolution2D(256, 1, 1, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(normalization.BatchNormalization())

    model.add(Convolution2D(112, 1, 1, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(normalization.BatchNormalization())
   
    print "output shape: ",model.output_shape
    #softmax
    model.add(Reshape((112,224*224)))

    print "output_shape after reshaped: ",model.output_shape
    model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
def make_model_full(inshape, num_classes, weights_file=None):
    model = Sequential()
    model.add(KL.InputLayer(input_shape=inshape[1:]))
    # model.add(KL.Conv2D(32, (3, 3), padding='same', input_shape=inshape[1:]))
    model.add(KL.Conv2D(32, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Conv2D(32, (3, 3)))
    model.add(KL.Activation('relu'))
    model.add(KL.MaxPooling2D(pool_size=(2, 2)))
    model.add(KL.Dropout(0.25))

    model.add(KL.Conv2D(64, (3, 3), padding='same'))
    model.add(KL.Activation('relu'))
    model.add(KL.Conv2D(64, (3, 3)))
    model.add(KL.Activation('relu'))
    model.add(KL.MaxPooling2D(pool_size=(2, 2)))
    model.add(KL.Dropout(0.25))

    model.add(KL.Flatten())
    model.add(KL.Dense(512))
    model.add(KL.Activation('relu'))
    model.add(KL.Dropout(0.5))
    model.add(KL.Dense(num_classes))
    model.add(KL.Activation('softmax'))

    if weights_file is not None and os.path.exists(weights_file):
        model.load_weights(weights_file)

    return model
Ejemplo n.º 25
0
def temporalNet(weights=None):
    model = Sequential()
    #3D convolutional layer with 32x32 optical flow as input
    model.add(Convolution3D(30, 20, 17, 17, subsample=(4,2,2), input_shape=(1, 120,32,32)))
    model.add(Activation(LeakyReLU()))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(13, 2, 2), strides=(13,2, 2)))
    model.add(Reshape((60, 4, 4)))


    model.add(Convolution2D(100, 3, 3))
    model.add(Activation(LeakyReLU()))
    model.add(BatchNormalization())
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(Flatten())


    model.add(Dense(400))
    model.add(Activation(LeakyReLU()))
    model.add(Dropout(0.2))
    model.add(Dense(50))
    model.add(Activation(LeakyReLU()))
    model.add(BatchNormalization())
    model.add(Dense(4, activation='softmax'))

    if weights:
        model.load_weights(weights)

    return model
Ejemplo n.º 26
0
def vgg_basic(img_size, weights_path = None, lr = 0.001):
    '''
    INPUT: img_size = size of images to train/ model was trained on
           weights_path = path to get weights of trained model
    OUTPUT: the fitted/unfitted model depending on if a weights path was
            specified

    A basic convolutional neural net. I found this one to have the best results.
    '''
    model = Sequential()

    model.add(ZeroPadding2D((1,1),input_shape=(3, img_size, img_size)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(5, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)

    adam = Adam(lr = lr)

    model.compile(optimizer=adam,
                  loss='categorical_crossentropy')

    return model
Ejemplo n.º 27
0
def create_model(load_weights=False):
    nn = Sequential()
    nn.add(Convolution2D(32, 1, 3, 3, border_mode='same', activation='relu'))
    nn.add(Convolution2D(32, 32, 3, 3, border_mode='same', activation='relu'))
    nn.add(MaxPooling2D(poolsize=(2,2)))
    nn.add(Dropout(0.25))
    
    nn.add(Convolution2D(64, 32, 3, 3, border_mode='same', activation='relu')) 
    nn.add(Convolution2D(64, 64, 3, 3, border_mode='same', activation='relu'))
    nn.add(MaxPooling2D(poolsize=(2,2)))
    nn.add(Dropout(0.25))
    
    nn.add(Flatten())
    nn.add(Dense(64*7*7, 256, activation='relu'))
    nn.add(Dropout(0.5))
    
    nn.add(Dense(256,10, activation='softmax'))

    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    nn.compile(loss='categorical_crossentropy', optimizer=sgd)
    
    if load_weights:
        nn.load_weights('cnn_weights.hdf5')
    
    return nn
Ejemplo n.º 28
0
def model(df, parent_id, go_id):

    # Training
    batch_size = 64
    nb_epoch = 64

    # Split pandas DataFrame
    n = len(df)
    split = 0.8
    m = int(n * split)
    train, test = df[:m], df[m:]


    # train, test = train_test_split(
    #     labels, data, batch_size=batch_size)

    train_label, train_data = train['labels'], train['data']

    if len(train_data) < 100:
        raise Exception("No training data for " + go_id)

    test_label, test_data = test['labels'], test['data']
    test_label_rep = test_label


    train_data = train_data.as_matrix()

    test_data = test_data.as_matrix()
    train_data = numpy.hstack(train_data).reshape(train_data.shape[0], 8000)
    test_data = numpy.hstack(test_data).reshape(test_data.shape[0], 8000)
    shape = numpy.shape(train_data)

    print('X_train shape: ', shape)
    print('X_test shape: ', test_data.shape)
    model = Sequential()
    model.add(Dense(8000, activation='relu', input_dim=8000))
    model.add(Highway())
    model.add(Dense(1, activation='sigmoid'))

    model.compile(
        loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')

    model_path = DATA_ROOT + parent_id + '/' + go_id + '.hdf5'
    checkpointer = ModelCheckpoint(
        filepath=model_path, verbose=1, save_best_only=True)
    earlystopper = EarlyStopping(monitor='val_loss', patience=7, verbose=1)

    model.fit(
        X=train_data, y=train_label,
        batch_size=batch_size, nb_epoch=nb_epoch,
        show_accuracy=True, verbose=1,
        validation_split=0.2,
        callbacks=[checkpointer, earlystopper])

    # Loading saved weights
    print 'Loading weights'
    model.load_weights(model_path)
    pred_data = model.predict_classes(
        test_data, batch_size=batch_size)
    return classification_report(list(test_label_rep), pred_data)
Ejemplo n.º 29
0
def logistic_regression(model_folder, layer, dimension, number_of_feature,
                        cost="binary_crossentropy", learning_rate=1e-6, dropout_rate=0.5, nepoch=10, activation="relu"):

    model = Sequential()
    model.add(Dense(dimension, input_dim=number_of_feature, init="uniform", activation=activation))
    model.add(Dropout(dropout_rate))

    for idx in range(0, layer-2, 1):
        model.add(Dense(dimension, input_dim=dimension, init="uniform", activation=activation))
        model.add(Dropout(dropout_rate))

    model.add(Dense(1, init="uniform", activation="sigmoid"))

    optimizer = RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-06)
    model.compile(loss=cost, optimizer=optimizer, metrics=['accuracy'])

    filepath_model = get_newest_model(model_folder)
    if filepath_model:
        model.load_weights(filepath_model)

        log("Load weights from {}".format(filepath_model), INFO)
    else:
        log("A new one model, {}".format(model_folder), INFO)

    return model
Ejemplo n.º 30
0
def C3D_Sports1M(weights_path=None):
    model = Sequential()
    # 1st layer group
    model.add(Convolution3D(64, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv1',
                            subsample=(1, 1, 1),
                            input_shape=(3, 16, 112, 112)))
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
                           border_mode='valid', name='pool1'))

    # 2nd layer group
    model.add(Convolution3D(128, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv2',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           border_mode='valid', name='pool2'))

    # 3rd layer group
    model.add(Convolution3D(256, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv3a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(256, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv3b',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           border_mode='valid', name='pool3'))

    # 4th layer group
    model.add(Convolution3D(512, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv4a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(512, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv4b',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           border_mode='valid', name='pool4'))

    # 5th layer group
    model.add(Convolution3D(512, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv5a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(512, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv5b',
                            subsample=(1, 1, 1)))
    model.add(ZeroPadding3D(padding=(0, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           border_mode='valid', name='pool5'))
    model.add(Flatten())

    # FC layers group
    model.add(Dense(4096, activation='relu', name='fc6'))
    model.add(Dropout(.5))
    model.add(Dense(4096, activation='relu', name='fc7'))
    model.add(Dropout(.5))
    model.add(Dense(487, activation='softmax', name='fc8'))

    if weights_path:
        model.load_weights(weights_path)

    return model
Ejemplo n.º 31
0
           activation='relu',
           padding='same',
           input_shape=input_shape))  # (?, 28, 28, 32)
auto_encoder.add(UpSampling2D((2, 2)))  # (?, 28, 28, 32)
auto_encoder.add(Conv2D(15, (3, 3), activation='relu',
                        padding='same'))  # (?, 28, 28, 32)
auto_encoder.add(Conv2D(1, (3, 3), activation='sigmoid',
                        padding='same'))  # (?, 28, 28, 1)
###############################################################

#model compile: optimizer, loss function
auto_encoder.compile(optimizer='sgd', loss='mean_squared_error')

#model summary
auto_encoder.summary()
auto_encoder.load_weights('denoise_epoch_3_5.h5')

#train the model
auto_encoder.fit(x_train_noisy,
                 x_train,
                 epochs=2,
                 batch_size=128,
                 shuffle=True,
                 validation_data=(x_test_noisy, x_test))

#obtain the model predictions in testing dataset
decoded_imgs_noise = auto_encoder.predict(x_test_noisy)
decoded_imgs = auto_encoder.predict(x_test)
#save the model
auto_encoder.save('denoise_epoch_3_7.h5')
n = 10
Ejemplo n.º 32
0
def build_model(args):
    """
    NVIDIA model used
    Image normalization to avoid saturation and make gradients work better.
    Convolution: 5x5, filter: 24, strides: 2x2, activation: RELU
    Convolution: 5x5, filter: 36, strides: 2x2, activation: RELU
    Convolution: 5x5, filter: 48, strides: 2x2, activation: RELU
    Convolution: 3x3, filter: 64, strides: 1x1, activation: RELU
    Convolution: 3x3, filter: 64, strides: 1x1, activation: RELU
    Drop out (0.5)
    Fully connected: neurons: 100, activation:RELU
    Fully connected: neurons: 50, activation: RELU
    Fully connected: neurons: 10, activation: RELU
    Fully connected: neurons: 1 (output)

    # the convolution layers are meant to handle feature engineering
    the fully connected layer for predicting the steering angle.
    dropout avoids overfitting
   
    """
    model = Sequential()
    model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=INPUT_SHAPE))
    model.add(
        Conv2D(24,
               5,
               5,
               activation='relu',
               subsample=(2, 2),
               W_regularizer=l2(0.001)))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))

    model.add(
        Conv2D(36,
               5,
               5,
               activation='relu',
               subsample=(2, 2),
               W_regularizer=l2(0.001)))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))

    model.add(
        Conv2D(48,
               5,
               5,
               activation='relu',
               subsample=(2, 2),
               W_regularizer=l2(0.001)))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))

    model.add(Conv2D(64, 3, 3, activation='relu', W_regularizer=l2(0.001)))
    # model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))

    model.add(Conv2D(64, 3, 3, activation='relu', W_regularizer=l2(0.001)))
    # model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))

    # model.add(Dropout(args.keep_prob))
    model.add(Flatten())
    model.add(Dense(1164, activation='relu', W_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(100, activation='relu', W_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(50, activation='relu', W_regularizer=l2(0.001)))
    model.add(Dropout(0.4))
    model.add(Dense(10, activation='relu', W_regularizer=l2(0.001)))
    model.add(Dropout(0.4))
    model.add(Dense(1))
    model.summary()
    model.load_weights("model-ghosts.h5")
    return model
Ejemplo n.º 33
0
def vgg16_model(img_rows, img_cols, channel=1, num_classes=None):
    """VGG 16 Model for Keras
    Model Schema is based on 
    https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
    ImageNet Pretrained Weights 
    https://drive.google.com/file/d/0Bz7KyqmuGsilT0J5dmRCM0ROVHc/view?usp=sharing
    Parameters:
      img_rows, img_cols - resolution of inputs
      channel - 1 for grayscale, 3 for color 
      num_classes - number of categories for our classification task
    """
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(channel, img_rows, img_cols)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Add Fully Connected Layer
    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    # Loads ImageNet pre-trained data
    model.load_weights('D:\Projeto\Datase\vgg16_weights.h5')

    # Truncate and replace softmax layer for transfer learning
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.layers[-1].outbound_nodes = []
    model.add(Dense(num_classes, activation='softmax'))

    # Uncomment below to set the first 10 layers to non-trainable (weights will not be updated)
    #for layer in model.layers[:10]:
    #    layer.trainable = False

    # Learning rate is changed to 0.001
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Ejemplo n.º 34
0
class QNetwork(tf.keras.Model):

    # This class essentially defines the network architecture.
    # The network should take in state of the world as an input,
    # and output Q values of the actions available to the agent as the output.

    def __init__(self,
                 environment_name,
                 obs_space,
                 action_space,
                 lr,
                 save_weights_path=None):
        # Define your network architecture here. It is also a good idea to define any training operations
        # and optimizers here, initialize your variables, or alternately compile your model here.
        #pdb.set_trace()

        super(QNetwork, self).__init__()
        decay_rate = lr / 10000000
        self.model = Sequential()
        self.save_weights_path = save_weights_path

        #	pdb.set_trace()
        input_dim = obs_space.shape[0]
        if (environment_name == "CartPole-v0"):
            self.model.add(Dense(30, input_dim=input_dim,
                                 activation='tanh'))  #30 or  50
            # self.model.add(Dense(32,activation='relu'))
            self.model.add(Dense(action_space.n))

        else:
            self.model.add(Dense(96, input_dim=input_dim, activation='relu'))
            self.model.add(Dense(96, activation='relu'))
            self.model.add(Dense(96, activation='tanh'))
            self.model.add(Dense(action_space.n))

        adam = optimizers.Adam(lr=lr)
        self.model.compile(optimizer=adam, loss='mean_squared_error')

    def save_model_weights(self, suffix):
        # Helper function to save your model / weights.
        file_path = os.path.join(self.save_weights_path, suffix)
        self.model.save_weights(file_path)

    def load_model(self, model_file):
        # Helper function to load an existing model.
        # e.g.: torch.save(self.model.state_dict(), model_file)

        self.model = tf.keras.load_model(
            os.path.join(self.save_weights_path, model_file))

    def load_model_weights(self, weight_file):
        # Helper funciton to load model weights.
        # e.g.: self.model.load_state_dict(torch.load(model_file))
        self.model.load_weights(
            os.path.join(self.save_weights_path, weight_file))

        pass

    def custom_mse_loss(y_true, y_pred):
        ## y_true: actual q_value of the state, chosen action
        ## y_pred: q_values for all the functions for the corresponding state

        y_out = y_pred[0, actions]
        loss = keras.losses.mean_squared_loss(y_true, y_out)

        return loss
Ejemplo n.º 35
0
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam')

#filepath="weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
filepath="weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]

model.fit(X, y, epochs=50, batch_size=64, callbacks=callbacks_list)


'''
filename = "FILENAME"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')

int_to_char = dict((i, c) for i, c in enumerate(chars))

start = np.random.randint(0, len(dataX)-1)
pattern = dataX[start]
print("Seed: ")
print("\"", ''.join([int_to_char[value] for value in pattern]), "\"")
for i in range(1500):
    x = np.reshape(pattern, (1, len(pattern), 1))
    x = x / float(vocabulary)
    prediction = model.predict(x, verbose=0)
    index = np.argmax(prediction)
          validation_data=(x_val, y_val),
          epochs=4,
          callbacks=[checkpointer],
          batch_size=64)

del data
del texts
del Y_train
del labels

print("Creating average feature vecs for test reviews..")
texts_test = []
for review in test["review"]:
    texts_test.append(review_to_wordlist(review, remove_stopwords=True))

sequences_test = tokenizer.texts_to_sequences(texts_test)

X_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)

model.load_weights('weights.hdf5')
result = model.predict(X_test)
result[result > 0.5] = 1
result[result < 0.5] = 0
result = result.astype(int)
result = np.squeeze(result)
id_t = test["id"]
output = pd.DataFrame(data={"id": id_t, "sentiment": result})
output.to_csv("Word2Vec_CNNVectors_300_pool_epoch4_lemma.csv",
              index=False,
              quoting=3)
Ejemplo n.º 37
0
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Activation('relu'))

model.add(Dropout(0.75))

model.add(Dense(64))
model.add(BatchNormalization())
model.add(Activation('relu'))

model.add(Dropout(0.5))

model.add(Dense(4))
model.add(Activation('softmax'))

model.load_weights(weights)


def precision(cl, row):
    return row[cl] / sum(row)


def recall(cl, col):
    return col[cl] / sum(col)


def test_cls(cls_name, cls_num, res):
    files = [f for f in glob.glob(data + '/' + cls_name + '/*.bmp')]
    for f in files:
        img = image.load_img(f, target_size=(img_size, img_size))
        img = image.img_to_array(img)
Ejemplo n.º 38
0
class DogBreedClassifier():
    def __init__(self):

        self._faceDetector = HumanFaceDetector()
        self._dogDetector = DogDetector()

        with open('data/dog_names.json', 'r') as f:
            self._dog_names = json.load(f)
        f.close()

        # ResNet-50 model for dog breed classification
        with session.graph.as_default() and session.session.as_default():
            self._dogBreedCNN = Sequential()
            self._dogBreedCNN.add(
                GlobalAveragePooling2D(input_shape=(7, 7, 2048)))
            self._dogBreedCNN.add(Dense(133, activation='softmax'))
            self._dogBreedCNN.load_weights(
                'data/model_weights_best_Resnet50.hdf5')

        # ResNet-50 model for feature extration
        with session.graph.as_default() and session.session.as_default():
            self._featureExtractor = ResNet50(weights='imagenet',
                                              include_top=False)

    # Helper function to generate bottleneck features from CNN
    def _extract_bottleneck_features(self, tensor):
        '''
        INPUT:
        Image tensor

        OUTPUT:
        bottleneck features

        Description:
        Extract bottleneck features
        '''

        return self._featureExtractor.predict(preprocess_input(tensor))

    # Classification of dog breed
    def predict_breed(self, img_path):
        '''
        INPUT:
        img_path    path of image

        OUTPUT:
        dog breed

        Description:
        Takes a path to an image as input and returns the dog breed that is predicted by the model.
        '''

        features = self._extract_bottleneck_features(
            convert_path_to_tensor(img_path))
        # obtain predicted vector
        predicted_vector = self._dogBreedCNN.predict(features)
        # return dog breed that is predicted by the model
        return self._dog_names[np.argmax(predicted_vector)]

    # Algorithm to obtain dog breed from dog images or resembling dog breed from human face
    def classify_dog_breed(self, img_path):
        '''
        INPUT:
        img_path    path of image 

        OUTPUT:
        dog breed

        Description:
        Takes a path to an image and check if a dog or a human face is in the image.
        If True, then, predicts the dog breed (resembling dog breed for human face).
        If neither a dog or human face in the image, return (-1,-1)
        '''

        if self._dogDetector.detect_dog(img_path):
            # image contains a dog
            breed_id = self.predict_breed(img_path)
            isDog = 1

        else:
            faces = self._faceDetector.detect_faces(img_path)
            if len(faces) and faces.shape[0] == 1:
                # image contains at least a human face
                breed_id = self.predict_breed(img_path)
                isDog = 0
            else:
                # Image does not contain a dog nor a human face
                breed_id = -1
                isDog = -1

        return (isDog, breed_id)
Ejemplo n.º 39
0
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

epochs = 50
batch_size = 128
tbCallBack = keras.callbacks.TensorBoard(log_dir='./tensorboard',
                                         histogram_freq=0,
                                         write_graph=True,
                                         write_images=True)

model.load_weights('model.hdf5')

model.fit(train_X,
          train_y,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(test_X, test_y),
          callbacks=[tbCallBack])

scores = model.evaluate(train_X, train_y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

scores = model.evaluate(test_X, test_y)
print("\nTEST %s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
Ejemplo n.º 40
0
# coding: utf-8
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import gym
env = gym.make('CartPole-v0')

brain = Sequential([
    Dense(16, activation='relu', input_shape=(4,)),
    Dense(2, activation='softmax')
])

brain.load_weights('./best.h5')


def test(env, brain):
    done = False
    score = 0
    state = env.reset()
    while not done:
        # env.render()
        state = np.expand_dims(state, axis=0)
        action = brain.predict(state).argmax()
        state, reward, done, _ = env.step(action)
        score += reward
    return score


scores = []
for i in range(100):
    score = test(env, brain)
Ejemplo n.º 41
0
                                save_best_only=True)
 early_stopping = EarlyStopping(monitor='val_loss', patience=pat)
 opt = Adam(lr=learnrate,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=0.0)
 model.compile(loss="exp_xsec", optimizer=opt)
 model.fit(X_AL,
           y_AL,
           validation_data=(X_train, y_train),
           nb_epoch=epochs,
           batch_size=bat,
           verbose=2,
           callbacks=[history, checkpointer, early_stopping])
 model.load_weights("27_general_gpu_5_" + str(j) + ".hdf5")
 learnrate /= 2
 j = j + 1
 #evaluation
 y_pred = model.predict(X_test)
 print(np.shape(y_pred))
 y_pred = np.exp(y_pred - 14.92)
 print(np.shape(y_pred))
 y_true = np.exp(y_test - 14.92)
 print(np.shape(y_true))
 y_true = np.reshape(y_true, (-1, 1))
 y_true = np.array(y_true)
 y_pred = np.array(y_pred)
 #true vs. prediction figure for the 10k sample test set
 ideal = np.arange(np.min(y_true), np.max(y_true), 0.005)
 fig3 = plt.figure(figsize=(10, 10))
Ejemplo n.º 42
0
    ### Dog race model ###
    # Transfer learning
    print('Loading inception_resnet_v2')
    model_cropped = InceptionResNetV2(weights='imagenet', include_top=False)
    # End layers
    doggo_model = Sequential()
    # Input shape from the output of model_cropped using 224x224 images
    doggo_model.add(GlobalAveragePooling2D(input_shape=(5, 5, 1536)))
    doggo_model.add(Dense(133))
    doggo_model.add(Dropout(0.4))
    doggo_model.add(Dense(133))
    doggo_model.add(Dropout(0.4))
    doggo_model.add(Dense(133, activation='softmax'))
    print('Loading end-layer weights')
    doggo_model.load_weights('models/weights.best.doggo_model.hdf5')

### Human detector
# extract pre-trained face detector
face_cascade = cv2.CascadeClassifier('models/haarcascade_frontalface_alt.xml')


# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index', methods=['POST'])
def index():

    return render_template('master.html')  #, ids=ids, graphJSON=graphJSON)


@app.route('/predict', methods=['POST'])
class Biometric:
	
	def __init__(self, window):
		#Initializing the GUI
		self.window = window
		self.window.wm_title("Face Biometric")
		
		l1 = Label(window, text = "Enter the roll no.")
		l1.grid(row = 0, column = 0)
		
		self.roll = StringVar()
		self.e1 = Entry(window, textvariable = self.roll)
		self.e1.grid(row = 0, column = 2, rowspan = 2)
		
		b1 = Button(window, text = "Create profile", width = 12, command = self.create_directory)
		b1.grid(row = 4, column = 4)
		
		b2 = Button(window, text = "Capture face", width = 12, command = self.capture_face)
		b2.grid(row = 5, column = 4)
		
		b3 = Button(window, text = "Train", width = 12, command = self.train)
		b3.grid(row = 6, column = 4)
		
		b4 = Button(window, text = "load model", width = 12, command = self.load)
		b4.grid(row = 7, column = 4)
		
		b5 = Button(window, text = "Close", width = 12, command = self.window.destroy)
		b5.grid(row = 8, column = 4)
		
		b6 = Button(window, text = "Creator", width = 12, command = self.creator)
		b6.grid(row = 8, column = 1)
		
		b7 = Button(window, text = "Detect", width = 12, command = self.detect)
		b7.grid(row = 8, column = 2)
		
		b8 = Button(window, text = "Save model", width = 12, command = self.save)
		b8.grid(row = 8, column = 3)
		
		self.list1 = Listbox(window, height = 3, width = 25)
		self.list1.grid(row = 6, column = 1, columnspan = 3)
		
		#Neural Network Structure initialization
		# Initializing the CNN
		self.classifier = Sequential()

		# Step 1 - Convolution
		self.classifier.add(Conv2D(32, (3, 3), input_shape = (256, 256, 3), activation = 'relu'))

		# Step 2 - Pooling
		self.classifier.add(MaxPooling2D(pool_size = (2, 2)))

		# Adding a second convolutional layer
		self.classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
		self.classifier.add(MaxPooling2D(pool_size = (2, 2)))

		# Step 3 - Flattening
		self.classifier.add(Flatten())

		# Step 4 - Full connection
		self.classifier.add(Dense(units = 128, activation = 'relu'))
		self.classifier.add(Dropout(0.1))
		self.classifier.add(Dense(units = 128, activation = 'relu'))
		self.classifier.add(Dropout(0.1))
		self.classifier.add(Dense(units = self.compute_files(), activation = 'sigmoid'))

		# Compiling the CNN
		self.classifier.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])

		
		
		
	def create_directory(self):
		os.mkdir('dataset/training_set/' + self.roll.get())
		os.mkdir('dataset/test_set/' + self.roll.get())
		
		
	def capture_face(self):
		video = cv2.VideoCapture(0)
		frame_no = 0
		print("Capturing ")
		while True:
			
			_,frame = video.read()		#1st is the boolean, 2nd array of image

			cv2.imshow("Capturing",frame)
			key = cv2.waitKey(1)
			
			if frame_no == 200:
				break
				
			if frame_no % 10 == 8 or frame_no % 10 == 9:
				cv2.imwrite('dataset/' + '/test_set/' + self.roll.get()+'/' + str(frame_no)+'.jpg', frame)
			else:
				cv2.imwrite('dataset/' + '/training_set/' +self.roll.get()+'/'+str(frame_no)+'.jpg', frame)
			
			frame_no = frame_no + 1

		self.list1.delete(0,END)
		self.list1.insert(END, "Frames captured:"+str(frame_no))
		video.release()
		cv2.destroyAllWindows()
	
	
	def load(self):
		self.classifier.load_weights('weight_catdog.hdf5')
	
	def save(self):
		self.classifier.save_weights('weight_students.hdf5', overwrite = True)
	
	
	def train(self):
		train_datagen = ImageDataGenerator(rescale = 1./255,
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = True)

		test_datagen = ImageDataGenerator(rescale = 1./255)

		training_set = train_datagen.flow_from_directory('dataset/training_set',
														 target_size = (256, 256),
														 batch_size = 32,
														 class_mode = 'sparse')

		test_set = test_datagen.flow_from_directory('dataset/test_set',
													target_size = (256, 256),
													batch_size = 32,
													class_mode = 'sparse')

		#with tf.device('/CPU:2'):
		self.classifier.fit_generator(training_set,
								 steps_per_epoch = self.compute_files()*160,
								 epochs = 10,
								 validation_data = test_set,
								 validation_steps = self.compute_files()*40)
								 
								 
		save()						 
    
		
		
		
		
		
		
	def detect(self):
		video = cv2.VideoCapture(0)

		_, test_img = video.read()
	
		test_img = image.load_img(test_img, target_size = (256,256))
		test_img = image.img_to_array(test_img)
		test_img = np.expand_dims(test_img, axis = 0)
		result.append(classifier.predict(test_img))
	
		result = classifier.predict(test_img)
	
		categories = training_set.class_indices
		value = categories.values()
		key = categories.keys()
		c = {}
		for i,j in zip(value,key):
			c[i]=j

		self.list1.delete(0,END)
		self.list1.append(END,c[int(r)])
	
	
	
	def compute_files(self):
		cwd = os.getcwd()
		os.chdir(cwd + '\\dataset')
		l = os.listdir()
		os.chdir(cwd)
		return len(l)
	
	def creator(self):
		self.list1.delete(0,END)
		self.list1.insert(END, "Created by Shubham Banerjee!")
Ejemplo n.º 44
0
def rnn_train(x_train, y_train, batch_size=128, epochs=10, pretrained=None):

    with open('text_tokenizer.pkl', 'rb') as f:
        tokenizer = pickle.load(f)

    x_train = sequence.pad_sequences(x_train)

    # Split validation data
    x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                      y_train,
                                                      test_size=0.1,
                                                      random_state=6)

    word_index = tokenizer.word_index
    emb = utils.load_embedding(100, pretrain='glove')

    # Define RNN model
    rnn = Sequential()
    rnn.add(
        Embedding(len(word_index) + 1,
                  100,
                  weights=[emb],
                  input_length=x_train.shape[1],
                  trainable=False))
    #rnn.add(Flatten())
    rnn.add(GRU(256, dropout=0.6))
    rnn.add(Dense(256, activation='relu'))
    rnn.add(Dropout(0.2))
    rnn.add(Dense(128, activation='relu'))
    rnn.add(Dropout(0.2))
    rnn.add(Dense(128, activation='relu'))
    rnn.add(Dropout(0.2))
    rnn.add(Dense(64, activation='relu'))
    rnn.add(Dropout(0.2))
    rnn.add(Dense(64, activation='relu'))
    rnn.add(Dropout(0.2))
    rnn.add(Dense(y_train.shape[1], activation='sigmoid'))

    # Compile & print model summary
    rnn.compile(loss='categorical_crossentropy',
                optimizer='adam',
                metrics=[f1_score])
    print(rnn.summary())

    model_json = rnn.to_json()
    with open("models/rnn_model.json", "w") as json_file:
        json_file.write(model_json)

    plot_model(rnn, to_file='rnn_model.png', show_shapes=True)

    #history = History()
    checkpointer = ModelCheckpoint(filepath="./models/rnn.h5",
                                   verbose=1,
                                   save_best_only=True,
                                   monitor='val_f1_score',
                                   mode='max')
    earlystopping = EarlyStopping(monitor='val_f1_score',
                                  patience=10,
                                  verbose=1,
                                  mode='max')
    if pretrained != None:
        rnn.load_weights(pretrained)
        print('Continue Training.')
        rnn.fit(x_train,
                y_train,
                batch_size=batch_size,
                initial_epoch=20,
                verbose=1,
                epochs=epochs,
                validation_data=(x_val, y_val),
                callbacks=[earlystopping, checkpointer])

    else:
        # Train model
        print('Start training from scratch.')
        rnn.fit(x_train,
                y_train,
                batch_size=batch_size,
                verbose=1,
                epochs=epochs,
                validation_data=(x_val, y_val),
                callbacks=[earlystopping, checkpointer])
Ejemplo n.º 45
0
def func():
    """
Best performing model till now. Added layers to the webcamemocognizer one.
"""
    global probdislike, problike, count, stop

    model = Sequential()
    model.add(
        Convolution2D(32, (3, 3), padding='valid', input_shape=(48, 48, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64, (3, 3), padding='valid', activation='relu'))
    #model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(128, (3, 3), padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(256, (3, 3), padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(128, kernel_initializer="lecun_uniform"))
    #model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    #model = model_from_json(open('./models/Face_model_architecture.json').read())
    #model.load_weights('_model_weights.h5')
    model.load_weights('deep_model_weights_binary_best.h5py')
    #optimizer =Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss='binary_crossentropy', optimizer=sgd)

    cascPath = "haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascPath)

    video_capture = cv2.VideoCapture(0)

    while True:
        # Capture frame-by-frame
        #    sleep(0.8)
        ret, frame = video_capture.read()

        # detect faces
        gray, detected_faces = detect_face(frame)

        face_index = 0

        # predict output
        for face in detected_faces:
            (x, y, w, h) = face
            if w > 100:
                # draw rectangle around face
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

                # extract features
                extracted_face = extract_face_features(
                    gray, face, (0.075, 0.05))  #(0.075, 0.05)

                # predict smile
                prediction_result = model.predict_classes(
                    extracted_face.reshape(1, 48, 48, 1))
                pn = model.predict(extracted_face.reshape(1, 48, 48, 1))
                # draw extracted face in the top right corner
                frame[face_index * 48:(face_index + 1) * 48,
                      -49:-1, :] = cv2.cvtColor(extracted_face * 255,
                                                cv2.COLOR_GRAY2RGB)

                # annotate main image with a label
                if prediction_result == 1:
                    cv2.putText(frame, "like!!", (x, y), cv2.FONT_ITALIC, 2,
                                155, 10)
                    #print("Like")
                    problike = problike + pn[0][1]
                    probdislike = probdislike + pn[0][0]
                    count = count + 1
                elif prediction_result == 0:
                    cv2.putText(frame, "dislike", (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 2, 155, 10)
                    #print("DisLike")
                    problike = problike + pn[0][1]
                    probdislike = probdislike + pn[0][0]
                    count = count + 1

                # increment counter
                face_index += 1

        # Display the resulting frame
        #cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        if stop == 1:
            video_capture.release()
            while True:
                if stop == 0:
                    video_capture = cv2.VideoCapture(0)
                    break

        # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
Ejemplo n.º 46
0
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

orig_data = 'data/keras_dataset_300k_{}.pkl'   # 0-9
import_batch_dset1 = 'data/import_batch_20170813_{}.pkl'  # 0-9

savefile = 'net/keras_net_v0_2017aug7.h5'
filelist1 = [import_batch_dset1.format(i) for i in range(9)]
filelist2 = [orig_data.format(i) for i in range(10)]
testfile = 'data/import_batch_20170813_9.pkl'  # 0-9

filelist = filelist1[:4] + filelist2[:5] + filelist1[4:] + filelist2[5:]
model.load_weights(savefile)


def train_net(load=savefile):
    '''train_net loads parameters from savefile by default,
    and then trains 100 epochs on multi-file dataset
    specified by filelist'''
    if load:
        try:
            model.load_weights(load)
        except:
            input('WARNING: failed to load weights but will overwrite file {}'
                  .format(savefile))
    else:
        print('WARNING: not saving progress.')
Ejemplo n.º 47
0
  #train model
  model.fit(resh_x, resh_y, epochs=40)
  #, validation_split=0.2)
  #, callbacks=[early_stopping_monitor])

# train model before starting the game
createModel()

if TRAIN:
  preTrainModel()
  model.save("savedmodel")
  sys.exit(0)

# else
model.load_weights("savedmodel")

healths = [-1]*2
def printStats(update, attack):
  if update[0] != -1:
    healths[0] = update[0]
  if update[1] != -1:
    healths[1] = update[1]

  dr_att = "missed"
  if update[3] != -1:
    dr_att = update[3]

  h_att = "missed"
  if update[2] != -1:
    h_att = update[2]
Ejemplo n.º 48
0
    LSTM(HIDDEN_DIM, input_shape=(None, VOCAB_SIZE), return_sequences=True))
for i in range(LAYER_NUM - 1):
    model.add(LSTM(HIDDEN_DIM, return_sequences=True))
model.add(TimeDistributed(Dense(VOCAB_SIZE)))
model.add(Activation('softmax'))

print("Compiling the network")

model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

print("Generating a sample")
generate_text(model, args['generate_length'], VOCAB_SIZE, ix_to_char)

if not WEIGHTS == '':
    print("Loading weights")
    model.load_weights(WEIGHTS)
    nb_epoch = int(WEIGHTS[WEIGHTS.rfind('_') + 1:WEIGHTS.find('.')])
else:
    nb_epoch = 0

# Training if there is no trained weights specified
if args['mode'] == 'train' or WEIGHTS == '':
    while True:
        print('\n\nEpoch: {}\n'.format(nb_epoch))
        model.fit(X, y, batch_size=BATCH_SIZE, verbose=1, epochs=1)
        nb_epoch += 1
        generate_text(model, GENERATE_LENGTH, VOCAB_SIZE, ix_to_char)
        if nb_epoch % 10 == 0:
            model.save_weights(
                'checkpoint_layer_{}_hidden_{}_epoch_{}.hdf5'.format(
                    LAYER_NUM, HIDDEN_DIM, nb_epoch))
Ejemplo n.º 49
0
width = 224
height = 224

model = Sequential()
model.add(Conv2D(64, (4,4), input_shape=(width,height,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, (2,2), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(2, activation='softmax'))

model.load_weights("eighth_try.h5")

model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])





def save_img(path, savepath, origimg, typeimg, layeridx):

    img = load_img(path, target_size=(224,224))
    x = img_to_array(img) #numpy array
    x = x.reshape(x.shape) #adds on dimension for keras
Ejemplo n.º 50
0
          kernel_regularizer=keras.regularizers.l2(weight_decay),
          kernel_initializer=he_normal(),
          name='fc2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(
    Dense(10,
          kernel_regularizer=keras.regularizers.l2(weight_decay),
          kernel_initializer=he_normal(),
          name='predictions_cifa10'))
model.add(BatchNormalization())
model.add(Activation('softmax'))

# 加载预训练的VGG19参数
model.load_weights(filepath, by_name=True)

# -------- 优化器设置 -------- #
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

tb_cb = TensorBoard(log_dir=log_filepath, histogram_freq=0)
change_lr = LearningRateScheduler(scheduler)
cbks = [change_lr, tb_cb]

print('Using real-time data augmentation.')
datagen = ImageDataGenerator(horizontal_flip=True,
                             width_shift_range=0.125,
                             height_shift_range=0.125,
Ejemplo n.º 51
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim, )))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=2,
              validation_split=0.1)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=0)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim, )))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert (loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
def Run_CNN(test_dir):
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    IMG_SIZE = 350
    PretrainedVGG = VGG16(weights='imagenet', include_top=False, input_shape=(IMG_SIZE,IMG_SIZE,3))
    model = Sequential()
    model.add(PretrainedVGG)
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(128, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(64, activation='relu'))
    model.add(Flatten())
    model.add(Dense(7, activation='softmax'))
    model.load_weights('CNNModel1.h5')
    GenderModel = load_model('GenderDetectionModel.model')




    font = cv2.FONT_HERSHEY_SIMPLEX
    fontScale = 0.5
    fontColor = (0,0,255)
    lineType = 2
    expressionLabels = ['Neutral','Anger','Disgust','Fear','Happy','Sadness','Surprise']
    GenderLabels = ['Male','Female']


    hogX = cv2.HOGDescriptor()
    cap = cv2.VideoCapture(test_dir)
    frameRate = cap.get(5)
    preds, gpreds = [0]*7, [0,0]
    while 1:
        frameId = cap.get(1)
        ret, frame = cap.read()
        if not ret: break
        if frameId%floor(frameRate): continue
        faces = face_cascade.detectMultiScale(frame, 1.3,5)
        for (x,y,w,h) in faces:
            if w<100 or h<100: continue
            cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),4)
            roi_gray = frame[y:y+h, x:x+w]
            genderPic = roi_gray.copy()
            roi_gray = cv2.cvtColor(roi_gray,cv2.COLOR_BGR2GRAY)


            roi_gray = cv2.resize(roi_gray, (IMG_SIZE, IMG_SIZE), interpolation = cv2.INTER_AREA)
            roi_gray = np.repeat(roi_gray,3,-1)
            roi_gray = roi_gray.reshape(-1,IMG_SIZE,IMG_SIZE,3)
            prede = np.argmax(model.predict(roi_gray)[0])

            genderPic = genderPic / 255.0
            genderPic = cv2.resize(genderPic, (96, 96), interpolation = cv2.INTER_AREA)
            genderPic = genderPic.reshape(-1,96,96,3)
            predg = np.argmax(GenderModel.predict(genderPic)[0])
            preds[prede]+=1
            gpreds[predg]+=1

            cv2.putText(frame,expressionLabels[prede]+' '+GenderLabels[predg],
                            (x,y-10),font,fontScale,
                            fontColor,lineType)
        cv2.imshow('image',frame)
        k = cv2.waitKey(60) & 0xff
        if k == 27: break

    cap.release()
    cv2.destroyAllWindows()
    totalPreds = sum(preds)
    for i in range(7): print('{}: {} %'.format(expressionLabels[i],round(preds[i]/totalPreds,4)*100))
    mxIndx = np.argmax(np.array(preds))
    ExDict = dict(zip(expressionLabels,list(range(7))))
    Exy_true = ExDict[test_dir.split('\\')[-2]]
    if preds[Exy_true]==preds[mxIndx] and Exy_true!=mxIndx: mxIndx=Exy_true
    print('CNN Winning expression: {} with Accuracy: {} %'.format(expressionLabels[mxIndx],round(preds[mxIndx]/totalPreds,4)*100))

    totalX = sum(gpreds)
    mxIndx2 = np.argmax(np.array(gpreds))
    GenDict = dict(zip(GenderLabels,[0,1]))
    Geny_true = GenDict[test_dir.split('_')[-2]]
    if gpreds[Geny_true]==gpreds[mxIndx2] and Geny_true!=mxIndx2: mxIndx2=Geny_true
    print('Winning Gender: {} with Accuracy: {} %'.format(GenderLabels[mxIndx2],round(gpreds[mxIndx2]/totalX,4)*100))
    return (preds,gpreds)
Ejemplo n.º 53
0
              batch_size=batch_size,
              nb_epoch=BIG_epochs,
              verbose=1,
              validation_data=(x_test, y_test))

    print("Evaluating model ..")
    score = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    print("Savin Model weights")
    model.save_weights("new_stockweighs.h5")
    print("Model Weights Saved")

else:
    print("loading weights TRAIN_BIG FLAG set as FALSE")
    model.load_weights('stockweighs.h5')

print("Evaluating Initial Model ...\n")
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print('-' * 30)

print("Parameter Size of Initial Model and Memory Footprint ")
trainable_params = model.count_params()
footprint = trainable_params * 4
print("Memory footprint per Image Feed Forward ~= ",
      footprint / 1024.0 / 1024.0, "Mb")  # 2x Backprop
print('-' * 30)

## Obtaining the Output of the last Convolutional Layer After Flatten. Caruana et. al
test_dir = os.path.join(imdb_dir, 'test')

labels = []
texts = []

for label_type in ['neg', 'pos']:
    dir_name = os.path.join(test_dir, label_type)
    for fname in sorted(os.listdir(dir_name)):
        if fname[-4:] == '.txt':
            f = open(os.path.join(dir_name, fname))
            texts.append(f.read())
            f.close()
            if label_type == 'neg':
                labels.append(0)
            else:
                labels.append(1)

sequences = tokenizer.texts_to_sequences(texts)
x_test = pad_sequences(sequences, maxlen=maxlen)
y_test = np.asarray(labels)

# And let's load and evaluate the first model:

# In[25]:

model.load_weights('pre_trained_glove_model.h5')
model.evaluate(x_test, y_test)

# We get an appalling test accuracy of 54%. Working with just a handful of training samples is hard!
Ejemplo n.º 55
0
print(p)
target_names = ['CLASS0(CRACK)', 'CLASS1(PATCH)', 'CLASS2(ROAD)']


#
print(classification_report(np.argmax(y_test,axis=1),y_pred,target_names=target_names))
#diagonal values
print(confusion_matrix(np.argmax(y_test,axis=1),y_pred))

#%% saving weights
fname= "pav_dis_cnn.hdf5"
model.save_weights(fname,overwrite=True)

#%%loading weights
fname="pav_dis_cnn.hdf5"
model.load_weights(fname)


#%%
score = model.evaluate(x_test, y_test, verbose=0)
print('Test score', score)
#print('Test accuracy',score)
predict= model.predict_classes(x_test[1:15])
print(y_test[1:15])

#%%
#from keras.utils.np_utils import to_categorical
#a= np.categorical_crossentropy(x_test, y_pred)
#%%

Ejemplo n.º 56
0
def test_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = _make_batches(len(x_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim, )))
    model.add(Activation('relu'))
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=2,
              validation_split=0.1)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=0)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test)

    prediction = model.predict_generator(data_generator(x_test, y_test),
                                         1,
                                         max_queue_size=2,
                                         verbose=1)
    gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50),
                                        1,
                                        max_queue_size=2)
    pred_loss = K.eval(
        K.mean(
            losses.get(model.loss)(K.variable(y_test),
                                   K.variable(prediction))))

    assert (np.isclose(pred_loss, loss))
    assert (np.isclose(gen_loss, loss))

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim, )))
    model.add(Activation('relu'))
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert (loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Ejemplo n.º 57
0
             recurrent_activation='sigmoid',
             activity_regularizer=regularizers.l2(
                 0.01)))  # activity_regularizer=regularizers.l2(0.01)
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(Dense(10))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout(0.5))

    model.add(Dense(1))
    opt = Nadam(lr=0.005)  # 0.005, 0.002, 0.001, 0.01
    model.compile(loss='mse', optimizer=opt)

    model.load_weights(MODEL_FILE_PATH)
    predictions = model.predict(test_X)
    score = model.evaluate(test_X, test_Y, verbose=0)
    print('Test loss:', score)  # this is mean_squared_error

    ###########################################################

    df_result = pd.DataFrame(test_Y, columns=['real'])
    df_result['pred'] = pd.Series(predictions.reshape(-1))
    yhat = ta.DEMA(np.array(df_result['pred'].astype(float)),
                   timeperiod=3)  # 디노이징 이동평균
    df_result['pred_dn'] = pd.Series(yhat, index=df_result.index)
    df_result['pred_change'] = (df_result['pred_dn'] -
                                df_result['pred_dn'].shift(3))
    df_result['classify'] = df_result['pred_change'].transform(
        classify_trinary)
Ejemplo n.º 58
0
                           write_graph=True,
                           write_grads=False,
                           write_images=False,
                           embeddings_freq=0,
                           embeddings_layer_names=None,
                           embeddings_metadata=None)
callbacks_list = [checkpoint, early_stopping, tensor_board]
print('Train...')
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=[x_val, y_val],
          callbacks=callbacks_list)

model.load_weights(filepath)
score, acc = model.evaluate(x_test, y_test)
print(score, acc)
score, acc = model.evaluate(x_test, y_test)
print(score, acc)

# mosei = MOSI()
# embeddings = mosei.embeddings()
# sentiments = mosei.sentiments()
# train_ids = mosei.train()
# valid_ids = mosei.valid()
# embeddings = mosei.embeddings()
# train_set_ids = []
# for vid in train_ids:
#     for sid in embeddings['embeddings'][vid].keys():
#         if embeddings['embeddings'][vid][sid]:
                freq = int(parts[1])
                if freq >= UNAMBIG_FREQ_THRESHOLD:
                    word2tag[parts[0]] = freq

    with open(os.path.join(model_folder, 'm1_chars_postagger_net.config'),
              'rt') as cfg:
        params = json.load(cfg)
        WINDOW = int(params['WINDOW'])
        bits_per_char = int(params['bits_per_char'])
        bits_per_word = int(params['bits_per_word'])
        output_size = int(params['output_size'])
        INVERT = bool(params['invert'])

    model = model_from_json(
        open(os.path.join(model_folder, 'm1_chars_postagger_net.arch')).read())
    model.load_weights(
        os.path.join(model_folder, 'm1_chars_postagger_net.model'))

    rdr_corpus = codecs.open(input_path, 'r', 'utf-8')

    wrt_result = codecs.open(result_path, 'w', 'utf-8')

    winspan = int((WINDOW - 1) / 2)

    processed_sent_count = 0
    total_word_count = 0
    incorrect_pos_count = 0
    incorrect_tagset_count = 0
    sent = []
    good = True
    line_num = 0
    for line0 in rdr_corpus:
Ejemplo n.º 60
0
    #                         verbose=0, loss='euclidean',
    #                         mean_y_train=mean_y_train, std_y_train=std_y_train)
    # model.fit(X_train, Y_train, batch_size=batch_size, epochs=5,
    #           callbacks=[modeltest_1, modeltest_2])
    model.fit(X_train, Y_train, batch_size=batch_size, epochs=50)

    # Potentially save weights
    # model.save_weights("path", overwrite=True)

    final_time = time.time()
    time_spent_printer(start_time, final_time)

else:
    print('loading model ..')
    # print('loading model from %s' % (folder + filename + ".hdf5"))
    model.load_weights(folder + filename + ".hdf5")

# --

print("Test...")

# Evaluate model
# Dropout approximation for training data:
standard_prob = model.predict(X_train, batch_size=500, verbose=1)
print(
    np.mean(((mean_y_train + std_y_train * np.atleast_2d(Y_train).T) -
             (mean_y_train + std_y_train * standard_prob))**2, 0)**0.5)

# --

# Dropout approximation for test data: