Example #1
0
def test_cache():
    from keras.layers.core import Dense
    from theano import pp, function
    from theano import config
    import cPickle as pkl
    # Theano configuration
    config.optimizer = 'fast_run'

    X = T.matrix()
    d = Dense(200, input_dim=1000)
    # d1 = Dense(200, input_dim=1000)
    d.build()
    Y = d(X) + d(X)
    z = d(X)
    Y1 = z + z
    f = function([X], Y)
    f1 = function([X], Y1)
    # print pp(Y)
    # print pp(f.maker.fgraph.outputs[0])
    print theano.printing.debugprint(f)
    print
    print theano.printing.debugprint(f1)
    print
    print theano.printing.debugprint(z)

    pkl.dump(f, open('test.pkl', 'wb'))
    pkl.dump(f1, open('test1.pkl', 'wb'))
Example #2
0
 def build(self):
     # list of embedding layers
     self.question = []
     self.facts = []
     self.memory = []
     self.Ws = []
     self.trainable_weights = []
     for i in range(self.hops):
         q = BagEmbedding(self.input_dim, self.q_nb_words, self.output_dim,
                          1, bow_mode=self.bow_mode,
                          mask_zero=self.mask_zero, dropout=self.dropout)
         q.build()
         f = BagEmbedding(self.input_dim, self.f_nb_words, self.output_dim,
                          self.input_length, bow_mode=self.bow_mode,
                          mask_zero=self.mask_zero, dropout=self.dropout)
         f.build()
         m = BagEmbedding(self.input_dim, self.f_nb_words, self.output_dim,
                          self.input_length, bow_mode=self.bow_mode,
                          mask_zero=self.mask_zero, dropout=self.dropout)
         m.build()
         self.question.append(q)
         self.facts.append(f)
         self.memory.append(m)
         if i == self.hops-1:
             w = Dense(self.output_dim, input_dim=self.output_dim,
                       activation=self.activation)
         else:
             w = Dense(self.output_dim, input_dim=self.output_dim,
                       activation=self.inner_activation)
         w.build()
         self.Ws.append(w)
         for l in (q, f, m, w):
             self.trainable_weights += l.trainable_weights
Example #3
0
    def build(self):
        self.lstms = []
        for i in range(self.depth):
            if i == 0:
                self.lstms.append(LSTM(self.output_dim, self.init, self.inner_init,
                                  self.forget_bias_init, self.activation,
                                  self.inner_activation, **self._kwargs))
            else:
                self._kwargs['input_dim'] = self.output_dim
                self.lstms.append(LSTM(self.output_dim, self.init, self.inner_init,
                                  self.forget_bias_init, self.activation,
                                  self.inner_activation, **self._kwargs))

        [lstm.build() for lstm in self.lstms]

        # Get a flat list of trainable_weights
        self.trainable_weights = [weights for lstm in self.lstms for weights in
                                  lstm.trainable_weights]

        if self.readout:
            self.readout_layer = Dense(self.readout, input_dim=self.output_dim,
                                       activation='softmax')
            self.readout_layer.build()
            self.trainable_weights.extend(self.readout_layer.trainable_weights)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights

        if self.stateful is not None:
            self.states = []
            for lstm in self.lstms:
                self.states.extend(lstm.states)
        else:
            self.states = [None, None] * self.depth
Example #4
0
def test_value():
    from keras.layers.core import Dense
    from theano import pp, function
    theano.config.compute_test_value = 'warn'

    # since the test input value is not aligned with the requirement in Dense,
    # it will report error quickly. Change 100 to 1000 will be fine.
    t_value = np.zeros((500, 1000), dtype=np.float32)
    X = T.matrix()
    X.tag.test_value = t_value
    d = Dense(200, input_dim=1000)
    # d1 = Dense(200, input_dim=1000)
    d.build()
    z = d(X)
    f = function([X], z)
    # turn it off after
    theano.config.compute_test_value = 'off'
Example #5
0
def _test_optimizer(optimizer, target=0.75):
    x_train, y_train = get_test_data()

    model = Sequential()
    model.add(Dense(10, input_shape=(x_train.shape[1],)))
    model.add(Activation('relu'))
    model.add(Dense(y_train.shape[1]))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
    assert history.history['acc'][-1] >= target
    config = optimizers.serialize(optimizer)
    optim = optimizers.deserialize(config)
    new_config = optimizers.serialize(optim)
    new_config['class_name'] = new_config['class_name'].lower()
    assert config == new_config

    # Test constraints.
    model = Sequential()
    dense = Dense(10,
                  input_shape=(x_train.shape[1],),
                  kernel_constraint=lambda x: 0. * x + 1.,
                  bias_constraint=lambda x: 0. * x + 2.,)
    model.add(dense)
    model.add(Activation('relu'))
    model.add(Dense(y_train.shape[1]))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    model.train_on_batch(x_train[:10], y_train[:10])
    kernel, bias = dense.get_weights()
    assert_allclose(kernel, 1.)
    assert_allclose(bias, 2.)
Example #6
0
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(
    Conv2D(
        64,
        (4, 4),
        padding='same',
        kernel_regularizer=regularizers.l2(reg),
    ))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(num_classes, activation='softmax', name='act_output'))

print(model.summary())

opt = Nadam(lr=0.0002,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            schedule_decay=0.004,
            clipvalue=3)
model.compile(loss={'act_output': 'categorical_crossentropy'},
              optimizer=opt,
              metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=6)
history = model.fit(X_train, {'act_output': train_Y_one_hot},
                    validation_split=0.2,
import os

#Loading our single test image: This can be easily adapted to work on the video frames from the robot
img_path = "/home/bxv7657/but/0030030.png"
im2 = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), (224, 224))
im = np.expand_dims(im2, axis=0)

#Using a VGG16 pre-trained on ImageNet
resmo = VGG16(weights=None, include_top=True)

#Freezing all the above layers
for layer in (resmo.layers):
  layer.trainable = False

#Adding a new Dense layer with 10 output nodes for 10 species of butterflies
x = Dense(10, activation='softmax', name='predictions')(resmo.layers[-2].output)

#Redeffining a new model with 10 output class nodes
my_model = Model(inputs=resmo.input,outputs=(x))
my_model.compile(optimizer="sgd", loss='categorical_crossentropy',metrics=['accuracy'])

#Loading weight file generated after running train.py
my_model.load_weights('butterflyvgg_weights.h5')
my_model.compile(optimizer="sgd", loss='categorical_crossentropy',metrics=['accuracy'])
preds=my_model.predict(im)

#Printing the name of species from the label.txt file
y_classes = np.argmax(preds)
with open('label.txt') as class_file:
       class_dict = ast.literal_eval(class_file.read())
fig, ax = plt.subplots()
Example #8
0
def alexnet_model(img_shape=(50, 50, 3), n_classes=2, l2_reg=0.,
    weights=None):

    # Initialize model
    alexnet = Sequential()

    # Layer 1
    alexnet.add(Conv2D(96, (11, 11), input_shape=img_shape,
        padding='same', kernel_regularizer=l2(l2_reg)))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 2
    alexnet.add(Conv2D(256, (5, 5), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 3
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(512, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 4
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(1024, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))

    # Layer 5
    alexnet.add(ZeroPadding2D((1, 1)))
    alexnet.add(Conv2D(1024, (3, 3), padding='same'))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(MaxPooling2D(pool_size=(2, 2)))

    # Layer 6
    alexnet.add(Flatten())
    alexnet.add(Dense(3072))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(Dropout(0.5))

    # Layer 7
    alexnet.add(Dense(4096))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('relu'))
    alexnet.add(Dropout(0.5))

    # Layer 8
    alexnet.add(Dense(n_classes))
    alexnet.add(BatchNormalization())
    alexnet.add(Activation('softmax'))

    if weights is not None:
        alexnet.load_weights(weights)

    return alexnet
Example #9
0
    def pretrain(self, batch_size=200, act='relu', nEpoch = 200):

        # we have to instantiate the model over and over again because we cannot freeze a layer after its been compiled
        self.weights = []
        for cSpecs in enumerate(zip(self.modelStructure, self.dropOuts), start=1):
            # cSpecs is e.g. (1, (500, 0.8)) where the first element gives how many hidden layers we have right now
            nLayers = cSpecs[0]
            nOut = cSpecs[1][0]
            pDrop = cSpecs[1][1]

            if nLayers == 1:
                # we have input -> hidden -> output
                model = Sequential()

                hiddenLayer = Dense(nOut, activation=act, input_dim=self.nFeatures) # first hidden layer
                dropoutLayer = Dropout(pDrop) # dropout layer
                outputLayer = Dense(1) # output layer (we have only one if nLayers == 0)

                model.add(hiddenLayer)
                model.add(dropoutLayer)
                model.add(outputLayer)

                # fit model
                adam = Adam(lr=0.00001)
                model.compile(loss='mse', optimizer=adam)
                model.fit(self.X_train, self.y_train, batch_size=batch_size, validation_split=0.2,
                            show_accuracy=True, verbose=1, nb_epoch=nEpoch)

                # save weights of hiddenLayer
                self.weights.append(hiddenLayer.get_weights())
                # instantiate a fresh model after grabbing the weights from the first hidden layer
                model = Sequential()

            else:
                # we have input -> hidden -> .. -> hidden -> output (at least two hidden layers)
                # nLayer indicates how many hidden layers we have (counting from 0)
                # first build frozen layers up to the current one
                for cLayer in range(nLayers):
                    nOut_cc = self.modelStructure[cLayer]
                    pDrop_cc = self.dropOuts[cLayer]

                    print "nlayers = ", nLayers
                    print "clayer = ", cLayer
                    print "layers.shape = ", len(self.weights)

                    if cLayer == 0:
                        # build first frozen layer
                        model.add(Dense(nOut_cc, weights=self.weights[cLayer], input_dim=self.nFeatures, activation=act, trainable=False))
                        model.add(Dropout(pDrop_cc))
                    elif cLayer < nLayers-1:
                        # build next frozen layer
                        model.add(Dense(nOut_cc, weights=self.weights[cLayer], activation=act, trainable=False))
                        model.add(Dropout(pDrop_cc))
                    else:
                        # build new hidden layer (which we want to train)
                        # Caveat: Now we have to use nOut # of hidden units and pDrop as dropout parameter
                        # as this is the hidden layer we add to the network
                        hiddenLayer = Dense(nOut, activation=act)
                        dropoutLayer = Dropout(pDrop)
                        outputLayer = Dense(1)

                        model.add(hiddenLayer)
                        model.add(dropoutLayer)
                        model.add(outputLayer)

                        # fit model
                        adam = Adam(lr=0.00001)
                        model.compile(loss='mse', optimizer=adam)
                        model.fit(self.X_train, self.y_train, batch_size=batch_size, validation_split=0.2,
                                    show_accuracy=True, verbose=1, nb_epoch=nEpoch)

                        # save weights of hiddenLayer
                        self.weights.append(hiddenLayer.get_weights())
                        model = Sequential()
        print "Pretraining complete"
Example #10
0
def simple_gan_generator(nb_units, z, labels, depth_map, tag3d, depth=2):
    n = nb_units
    depth_map_features = sequential([
        conv2d_block(n),
        conv2d_block(2 * n),
    ])(depth_map)

    tag3d_features = sequential([
        conv2d_block(n, subsample=2),
        conv2d_block(2 * n, subsample=2),
    ])(tag3d)

    x = sequential([
        Dense(5 * n),
        BatchNormalization(mode=2),
        Activation('relu'),
        Dense(5 * n),
        BatchNormalization(mode=2),
        Activation('relu'),
    ])(concat([z, labels]))

    blur = InBounds(0, 1, clip=True)(Dense(1)(x))

    x = sequential([
        Dense(8 * 4 * 4 * n),
        Activation('relu'),
        BatchNormalization(mode=2),
        Reshape((8 * n, 4, 4)),
    ])(x)

    x = sequential([
        conv2d_block(8 * n, filters=1, depth=1, up=True),  # 4x4 -> 8x8
        conv2d_block(8 * n, depth=depth, up=True),  # 8x8 -> 16x16
    ])(x)

    off_depth_map = sequential([
        conv2d_block(2 * n, depth=depth),
    ])(concat([x, depth_map_features]))

    light = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 -> 64x64
    ])(off_depth_map)

    def get_light(x):
        return sequential([
            conv2d_block(1, filters=1, batchnorm=False),
            GaussianBlur(sigma=4),
            InBounds(0, 1, clip=True),
        ])(x)

    light_sb = get_light(light)
    light_sw = get_light(light)
    light_t = get_light(light)

    background = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, batchnorm=False),
        InBounds(-1, 1, clip=True),
    ])(off_depth_map)

    details = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, depth=1, batchnorm=False),
        InBounds(-1, 1, clip=True)
    ])(concat(tag3d_features, off_depth_map))
    return blur, [light_sb, light_sw, light_t], background, details
Example #11
0
                conv_layer1 = lstm(conv_layer1)
                layers.append(conv_layer1)
                encoder_size += rnn_size

    classifier = keras.layers.concatenate(inputs=list(layers))

    # --------------------------------------------------------------------------

    # финальный классификатор определяет способ получения ответа:
    # 1) да/нет
    # 2) ответ строится копированием слов вопроса
    # 3) текст ответа генерируется сеткой
    # 4) ответ посимвольно генерируется сеткой и содержит одни цифры
    output_dims = 4

    classifier = Dense(encoder_size, activation='sigmoid')(classifier)
    #classifier = Dense(encoder_size//2, activation='relu')(classifier)
    #classifier = Dense(encoder_size//3, activation='relu')(classifier)
    classifier = Dense(output_dims, activation='softmax',
                       name='output')(classifier)

    model = Model(inputs=inputs, outputs=classifier)
    model.compile(loss='categorical_crossentropy',
                  optimizer='nadam',
                  metrics=['accuracy'])
    model.summary()

    with open(arch_filepath, 'w') as f:
        f.write(model.to_json())

    # -------------------------------------------------------------------------
def faceRecoModel(input_shape):
    """
        Implementation of the Inception model used for FaceNet
        
        Arguments:
        input_shape -- shape of the images of the dataset
        Returns:
        model -- a Model() instance in Keras
        """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # First Block
    X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X)
    X = BatchNormalization(axis=1, name='bn1')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D((3, 3), strides=2)(X)

    # Second Block
    X = Conv2D(64, (1, 1), strides=(1, 1), name='conv2')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn2')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)

    # Second Block
    X = Conv2D(192, (3, 3), strides=(1, 1), name='conv3')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn3')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D(pool_size=3, strides=2)(X)

    # Inception 1: a/b/c
    X = inception_block_1a(X)
    X = inception_block_1b(X)
    X = inception_block_1c(X)

    # Inception 2: a/b
    X = inception_block_2a(X)
    X = inception_block_2b(X)

    # Inception 3: a/b
    X = inception_block_3a(X)
    X = inception_block_3b(X)

    # Top layer
    X = AveragePooling2D(pool_size=(3, 3),
                         strides=(1, 1),
                         data_format='channels_first')(X)
    X = Flatten()(X)
    X = Dense(128, name='dense_layer')(X)

    # L2 normalization
    X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)

    # Create model instance
    model = Model(inputs=X_input, outputs=X, name='FaceRecoModel')

    return model
Example #13
0
def train():

    model = Sequential()

    X_train = np.load(home + '/gabor/numpyFiles/Training Set.npy')
    X_test = np.load(home + '/gabor/numpyFiles/TestSet.npy')
    Y_train = np.load(home + '/gabor/numpyFiles/Training Labels.npy')
    Y_test = np.load(home + '/gabor/numpyFiles/TestSet Labels.npy')

    #X_test = X_test.reshape(-1, 1, 30, 96)
    Y_test = np_utils.to_categorical(Y_test, 447)

    #X_train = X_train.reshape(-1, 1, 30, 96)
    Y_train = np_utils.to_categorical(Y_train, 447)

    print("X_test.shape == {};".format(X_test.shape))
    print("Y_test.shape == {};".format(Y_test.shape))
    print("X_test.shape == {};".format(X_train.shape))
    print("Y_test.shape == {};".format(Y_train.shape))

    nb_hidden_layers = [len(X_train[0]), 700, 500, 300]

    XtrainKeras = X_train
    print 'shape of XTrain Keras is ', XtrainKeras.shape
    YtrainKeras = np_utils.to_categorical(Y_train, nb_classes)
    op1 = RMSprop(lr=0.01, rho=0.5, epsilon=1e-8)

    X_train_tmp = XtrainKeras
    trained_encoders = []

    #XtrainKeras=XwhiteTrain.reshape(-1,1,len(XwhiteTrain),len(XwhiteTrain[0]))
    #YtrainKeras=np_utils.to_categorical(Y_train, nb_classes)

    #XtestKeras=X_test.reshape(-1,1,imageWidth,imageHeight)
    #YtestKeras=np_utils.to_categorical(Y_test, nb_classes)
    #X_train_tmp=XtrainKeras

    for n_in, n_out in zip(nb_hidden_layers[:-1], nb_hidden_layers[1:]):
        print('Pre-training the layer: Input {} -> Output {}'.format(
            n_in, n_out))
        # Create AE and training
        ae = Sequential()
        encoder = containers.Sequential(
            [Dense(n_out, input_dim=n_in, activation='sigmoid')])
        decoder = containers.Sequential(
            [Dense(n_in, input_dim=n_out, activation='sigmoid')])
        ae.add(
            AutoEncoder(encoder=encoder,
                        decoder=decoder,
                        output_reconstruction=False))
        ae.compile(loss='mean_squared_error', optimizer=op1)
        hist = ae.fit(X_train_tmp,
                      X_train_tmp,
                      batch_size=batch_size,
                      nb_epoch=nb_epoch)
        print(hist.history)
        Fname = prefix + 'autoencoder_n_in=' + str(n_in) + '_n_out= ' + str(
            n_out) + '.json'
        weightName = prefix + 'Weights_autoencoder_n_in=' + str(
            n_in) + '_n_out= ' + str(n_out) + '.h5'
        json_string = model.to_json()
        open(Fname, 'w').write(json_string)
        model.save_weights(weightName, overwrite=True)
        # Store trainined weight
        trained_encoders.append(ae.layers[0].encoder)
        # Update training data
        X_train_tmp = ae.predict(X_train_tmp)

    #ae1=Sequential()
    #encoder1=containers.Sequential([Dense(len(XwhiteTrain[0])-200,len(XwhiteTrain[0]),activation='sigmoid')])

    Y_test = np_utils.to_categorical(Y_test, nb_classes)
    #X_test=X_test.reshape(-1,len(X_test[0]))
    print 'shape of X_test  is ', X_test.shape
    print('Fine-tuning')
    sgd = SGD(lr=0.01, momentum=0.5, decay=0., nesterov=False)

    i = 1
    model = Sequential()
    for encoder in trained_encoders:
        model.add(encoder)
    model.add(
        Dense(nb_classes, input_dim=nb_hidden_layers[-1],
              activation='softmax'))

    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    hist = model.fit(XtrainKeras,
                     YtrainKeras,
                     batch_size=batch_size,
                     nb_epoch=nb_epoch,
                     show_accuracy=True,
                     validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
    Fname = prefix + '2 FineTuning_model=' + '.json'
    weightName = prefix + 'Fine Tunes Weights_autoencoder_i=' + str(i) + '.h5'
    json_string = model.to_json()
    open(Fname, 'w').write(json_string)
    model.save_weights(weightName, overwrite=True)
    model.add(Dropout(0.25))

    model.add(
        Convolution2D(nb_filters * 2,
                      nb_conv,
                      nb_conv,
                      border_mode='valid',
                      input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters * 2, nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adam')
    hist = model.fit(X_train,
                     Y_train,
                     batch_size=batch_size,
                     nb_epoch=nb_epoch,
                     show_accuracy=True,
                     verbose=1,
                     validation_data=(X_valid, Y_valid))
    Train_Result_Optimizer = hist.history
    Train_Loss = np.asarray(Train_Result_Optimizer.get('loss'))
Example #15
0
def Autoencoder(x_train, y_train, x_test, y_test):
    input_shape = (x_train.shape[1],)
    input2 = Input(input_shape)

    encoded = Dense(80, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod0')(input2)
    encoded = Dense(30, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod1')(encoded)
    encoded = Dense(10, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod2')(encoded)

    encoded= Dropout({{uniform(0, 1)}})(encoded)
    decoded = Dense(30, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='decoder1')(encoded)
    decoded = Dense(80, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='decoder2')(decoded)
    decoded = Dense(x_train.shape[1], activation='linear',
                    kernel_initializer='glorot_uniform',
                    name='decoder3')(decoded)


    model = Model(inputs=input2, outputs=decoded)
    model.summary()

    adam=Adam(lr={{uniform(0.0001, 0.01)}})
    model.compile(loss='mse', metrics=['acc'],
                  optimizer=adam)
    callbacks_list = [
        callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=10,
                                restore_best_weights=True),
    ]
    XTraining, XValidation, YTraining, YValidation = train_test_split(x_train, x_train, stratify=y_train,
                                                                      test_size=0.2)  # before model building

    tic = time.time()
    history= model.fit(XTraining, YTraining,
                      batch_size={{choice([32,64, 128,256,512])}},
                      epochs=150,
                      verbose=2,
                      callbacks=callbacks_list,
                      validation_data=(XValidation,YValidation))

    toc = time.time()


    score = np.amin(history.history['val_loss'])
    print('Best validation loss of epoch:', score)


    scores = [history.history['val_loss'][epoch] for epoch in range(len(history.history['loss']))]
    score = min(scores)
    print('Score',score)


    print('Best score',global_config.best_score)




    if global_config.best_score > score:
        global_config.best_score = score
        global_config.best_model = model
        global_config.best_numparameters = model.count_params()
        global_config.best_time = toc - tic



    return {'loss': score, 'status': STATUS_OK, 'n_epochs': len(history.history['loss']), 'n_params': model.count_params(), 'model': global_config.best_model, 'time':toc - tic}
X_test = np.arange(0.005, 1.005, 0.01)
y_test = [0 for ii in range(len(X_test))]
for ii in range(0, len(X_test)):
    if np.logical_and(X_test[ii] > 0.2, X_test[ii] < 0.6):
        y_test[ii] = (0, 1)
    else:
        y_test[ii] = (1, 0)

X_train = X_train.reshape(X_train.shape[0], 1)  #had tough time without this,
# y_train=y_train.reshape(y_train.shape[0],1)        #had tough time without this,

X_test = X_test.reshape(X_test.shape[0], 1)  #had tough time without this,
# y_test=y_test.reshape(y_test.shape[0],1)        #had tough time without this,

model = Sequential()
model.add(Dense(1, 500, init='normal', activation='tanh'))

model.add(Dense(500, 500, init='normal', activation='tanh'))
# model.add(Dropout(0.5))
model.add(Dense(500, 500, init='uniform', activation='tanh'))
# model.add(Dropout(0.5))
# model.add(Dense(50, 50, init='uniform', activation='tanh'))
# model.add(Dropout(0.5))

model.add(Dense(500, 2, init='normal', activation='softmax'))
# model.add(Activation('linear'))

sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)

model.fit(X_train, y_train, nb_epoch=5000, batch_size=use_batch_size)
	def build(width, height, depth, classes):
		# initialize the model along with the input shape to be
		# "channels last" and the channels dimension itself
		model = Sequential()
		inputShape = (height, width, depth)
		chanDim = -1

		# if we are using "channels first", update the input shape
		# and channels dimension
		if K.image_data_format() == "channels_first":
			inputShape = (depth, height, width)
			chanDim = 1

		# CONV => RELU => POOL
		model.add(Conv2D(64, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(Conv2D(64, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

		model.add(Conv2D(128, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(Conv2D(128, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

		model.add(Conv2D(256, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(Conv2D(256, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(Conv2D(256, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

		model.add(Conv2D(512, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(Conv2D(512, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(Conv2D(512, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

		model.add(Conv2D(512, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(Conv2D(512, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(Conv2D(512, (3, 3), padding="same", input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

		# first (and only) set of FC => RELU layers
		model.add(Flatten())
		model.add(Dense(4096))
		model.add(Activation("relu"))
		model.add(Dropout(0.5))
		model.add(Dense(4096)
		model.add(Activation("relu"))
		model.add(Dropout(0.5))

		# softmax classifier
		model.add(Dense(classes))
		model.add(Activation("softmax"))

		# return the constructed network architecture
		return model
execfile('00_readingInput.py')

''' Import l1,l2 (regularizer) '''
from keras.regularizers import l1,l2

''' set the size of mini-batch and number of epochs'''
batch_size = 16
epochs = 50

''' Import keras to build a DL model '''
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation

print('Building a model with regularizer L2')
model_l2 = Sequential()
model_l2.add(Dense(128, input_dim=200, kernel_regularizer=l2(0.005)))
model_l2.add(Activation('relu'))
model_l2.add(Dense(256, kernel_regularizer=l2(0.005)))
model_l2.add(Activation('relu'))
model_l2.add(Dense(5, kernel_regularizer=l2(0.005)))
model_l2.add(Activation('softmax'))

''' Setting optimizer as Adam '''
from keras.optimizers import SGD, Adam, RMSprop, Adagrad
model_l2.compile(loss= 'categorical_crossentropy',
              	optimizer='Adam',
              	metrics=['accuracy'])

'''Fit models and use validation_split=0.1 '''
history_l2 = model_l2.fit(X_train, Y_train,
							batch_size=batch_size,
Example #19
0
"""
Gets the unique answers and store them in a .sav file
"""

lbl = LabelEncoder()
lbl.fit(answers_train)
nb_classes = len(list(lbl.classes_))
pk.dump(lbl, open('v1/label_encoder_mlp.sav', 'wb'))
"""
Building the MLP VQA model and compiling the model
"""

model = Sequential(name="MLP model")
model.add(
    Dense(num_hidden_units,
          input_dim=word2vec_dim + img_dim,
          kernel_initializer='uniform',
          name="feeding_comined_image_question_vector"))
model.add(Dropout(dropout, name="Dropout_1_0.5"))
for i in range(num_hidden_layers):
    name_d = "MLP_" + str(i + 1) + "_Hidden_layer_size_1000"
    model.add(
        Dense(num_hidden_units, kernel_initializer='uniform', name=name_d))
    name_a = "Activation_" + str(i + 1) + "_tanh"
    model.add(Activation(activation, name=name_a))
    temp = "Dropout_" + str(i + 2) + "_0.5"
    model.add(Dropout(dropout, name=temp))
model.add(
    Dense(nb_classes,
          kernel_initializer='uniform',
          name="MLP_output_layer_size_1000"))
model.add(Activation('softmax', name="softmax_output_Probabilities"))
Example #20
0
feature = ['F1', 'F2', 'F3', 'F4']  #影响因素四个
label = ['L1']  #标签一个,即需要进行预测的值
data_train = data.loc[range(0, 6)].copy()  #标明excel表从第0行到520行是训练集

#2 数据预处理和标注
data_mean = data_train.mean()
data_std = data_train.std()
data_train = (data_train - data_mean) / data_std  #数据标准化
x_train = data_train[feature].as_matrix()  #特征数据
y_train = data_train[label].as_matrix()  #标签数据

#3 建立一个简单BP神经网络模型
from keras.models import Sequential
from keras.layers.core import Dense, Activation
model = Sequential()  #层次模型
model.add(Dense(12, input_dim=4, init='uniform'))  #输入层,Dense表示BP层
model.add(Activation('relu'))  #添加激活函数
model.add(Dense(1, input_dim=12))  #输出层
model.compile(loss='mean_squared_error', optimizer='adma')  #编译模型
model.fit(x_train, y_train, nb_epoch=1000, batch_size=6)  #训练模型1000次
model.save_weights(modelfile)  #保存模型权重

#4 预测,并还原结果。
x = ((data[feature] - data_mean[feature]) / data_std[feature]).as_matrix()
data[u'L1_pred'] = model.predict(x) * data_std['L1'] + data_mean['L1']

#5 导出结果
data.to_excel(outputfile)

#6 画出预测结果图
import matplotlib.pyplot as plt
    with open('bfvy_resnet.pkl', 'rb') as f:
        y_val2 = pickle.load(f)
    # X_train2 = datax['features']
    # y_train2 = datax['labels']
    # X_val2 = datay['features']
    # y_val2 = datay['labels']

    return X_train2, y_train2, X_val2, y_val2

X_train, y_train, X_val, y_val = load_bottleneck_data('resnet_train_bottleneck.json',
                                                      'resnet_validate_bottleneck.json')

input_shape = X_train.shape[1:]
inp = Input(shape=input_shape)
x = Flatten()(inp)
x = Dense(num_classes, activation='softmax')(x)
model = Model(inp, x)
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy',f1])
with tf.Session() as sess:
    # fetch session so Keras API can work
    K.set_session(sess)
    K.set_learning_phase(1)
    history =model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,
                       validation_data=(X_val, y_val), shuffle=True, verbose=1 )
    model.save_weights('resnet_bottleneck_weights.h5')
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
Example #22
0
print(df.head())
'''

dataset = df.values
X = dataset[:, 0:60]
Y_obj = dataset[:, 60]

e = LabelEncoder()
e.fit(Y_obj)
Y = e.transform(Y_obj)

# 학습 셋과 테스트 셋의 구분
X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    Y,
                                                    test_size=0.3,
                                                    random_state=seed)

model = Sequential()
model.add(Dense(24, input_dim=60, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train, Y_train, epochs=130, batch_size=5)

# 테스트셋에 모델 적용
print("\n Test Accuracy: %.4f" % (model.evaluate(X_test, Y_test)[1]))
Example #23
0
 def get_dense(nb):
     return [
         Dense(nb),
         batch_norm(),
         LeakyReLU(0.2),
     ]
Example #24
0
def run(df,fname):
    # parameters
    epsilon_0 = .001
    num_actions = 3 
    epoch = 5000
    max_memory = 100
    
    batch_size = 500
    lkbk = 3
    START_IDX = 300

    env = Game(df, lkbk=lkbk, max_game_len=1000,init_idx=START_IDX,run_mode='sequential')
    hidden_size = num_actions*len(env.state)*2
    model = Sequential()
    model.add(Dense(hidden_size, input_shape=(len(env.state),), activation='relu'))
    model.add(Dense(hidden_size, activation='relu'))
    model.add(Dense(num_actions))
    model.compile(SGD(lr=.05), "mse")

    # If you want to continue training from a previous model, just uncomment the line bellow
    # model.load_weights("indicator_model.h5")

    # Initialize experience replay object
    exp_replay = ExperienceReplay(max_memory=max_memory)

    # Train
    win_cnt = 0
    loss_cnt = 0
    wins = []
    losses = []
    pnls = []
    for e in range(epoch):
        action = 0
        # epsilon = epsilon_0**(np.log10(e))
        epsilon = 0.4
        env = Game(df, lkbk=lkbk, max_game_len=1000,init_idx=env.curr_idx,run_mode='sequential')
        loss = 0.
        env.reset()
        game_over = False
        # get initial input
        input_t = env.observe()

        cnt = 0
        while not game_over:
            print(cnt)
            cnt += 1
            input_tm1 = input_t
            # get next action

            if env.position:
                print('***Time Exit***')
                action = exit_action

            elif np.random.rand() <= epsilon:
                action = np.random.randint(0, num_actions, size=1)[0]
                if env.position == 0:
                    if action != 0:
                        print('***random entry***')
                        input_state_start = deepcopy(input_tm1)
                        action_start = action
                    if action == 2:
                        exit_action = 1
                    elif action == 1:
                        exit_action = 2
                    
            elif env.position == 0:
                q = model.predict(input_tm1)
                action = np.argmax(q[0])
                if action:
                    input_state_start = deepcopy(input_tm1)
                    action_start = action
                    exit_action = np.argmin(q[0][1:])+1
				

            # apply action, get rewards and new state
            input_t, reward, game_over = env.act(action)
            if reward > 0:
                win_cnt += 1
            elif reward < 0:
                loss_cnt += 1

            # store experience
            # if action or len(exp_replay.memory)<20 or np.random.rand() < 0.1:
                # exp_replay.remember([input_tm1, action, reward, input_t], game_over)

            # inputs, targets = exp_replay.get_batch(model, batch_size=batch_size)
            env.pnl_sum = sum(pnls)

            # zz = model.train_on_batch(inputs, targets)
            # loss += zz
        exp_replay.remember(input_state_start, action_start, reward)
        inputs, targets = exp_replay.get_batch(model, batch_size=batch_size)
        loss = model.train_on_batch(inputs, targets)
        prt_str = ("Epoch {:03d} | Loss {:.2f} | pos {} | len {} | sum pnl {:.2f}% @ {:.2f}% | eps {:,.4f} | {} | entry price {} | current price {}".format(e, 
                                                                                      loss, 
                                                                                      env.position, 
                                                                                      env.trade_len,
                                                                                      sum(pnls)*100,
                                                                                      env.pnl*100,
                                                                                      epsilon,
                                                                                      env.curr_time, env.entry, env.curr_price))
        print(prt_str)

        fid = open(fname,'a')
        fid.write(prt_str+'\n')
        fid.close()
        pnls.append(env.pnl)
        if not e%10:
            print('----saving weights-----')
            model.save_weights("indicator_model.h5", overwrite=True)
Example #25
0
pn['sent'] = pn['words'].apply(get_sent)  #速度太慢

maxlen = 50

print("Pad sequences (samples x time)")
pn['sent'] = list(sequence.pad_sequences(pn['sent'], maxlen=maxlen))

x = np.array(list(pn['sent']))[::2]  #训练集
y = np.array(list(pn['mark']))[::2]
xt = np.array(list(pn['sent']))[1::2]  #测试集
yt = np.array(list(pn['mark']))[1::2]
xa = np.array(list(pn['sent']))  #全集
ya = np.array(list(pn['mark']))

print('Build model...')
model = Sequential()
model.add(Embedding(len(dict) + 1, 256))
model.add(LSTM(256, 128))  # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(128, 1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              class_mode="binary")

model.fit(xa, ya, batch_size=16, nb_epoch=10)  #训练时间为若干个小时

classes = model.predict_classes(xa)
acc = np_utils.accuracy(classes, ya)
print('Test accuracy:', acc)
Example #26
0
#使用one hot encoding 来处理数据
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)

model = Sequential()

model.add(Convolution2D(25, (3, 3), input_shape=(28, 28, 1)))
model.add(MaxPool2D(2, 2))

model.add(Convolution2D(50, (3, 3)))
model.add(MaxPool2D(2, 2))

model.add(Flatten())

#第一个隐层
model.add(Dense(units=512, kernel_initializer='he_normal', activation='relu'))
model.add(Dropout(0.2))  #dropout 防止过拟合
#第二层
model.add(Dense(units=512, kernel_initializer='he_normal', activation='relu'))
model.add(Dropout(0.2))  #dropout 防止过拟合

model.add(Dense(units=10, activation='softmax'))  #输出

#训练
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(x_train,
          y_train,
          batch_size=64,
          epochs=20,
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import Adadelta

input_shape = (3, 32, 32)
nb_classes = 10

model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=Adadelta(),
              metrics=['accuracy'])
numOfPrevSteps = trainX.shape[1]
featurelen = trainX.shape[2]

plt.plot(X_t)
plt.plot(Y_t)
plt.show()

from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers import Dense, LSTM, SimpleRNN, GRU, Dropout

print('Building model...')

model = Sequential()
model.add(LSTM(50, batch_input_shape=trainX.shape, stateful=True))
model.add(Dense(featurelen))
model.add(Activation('softmax'))
model.compile(loss='mean_squared_error', optimizer='adam')
model.reset_states()

print('starting training')
num_epochs = 100
for e in range(num_epochs):
    print('epoch - ', e + 1)
    for i in range(trainX.shape[0] - 1):
        model.train_on_batch(
            trainX[i:i + 1, :, :], trainY[i:i + 1, :]
        )  # Train on guessing a single element based on the previous element
    model.reset_states()
    for i in range(100):
        pred = model.predict(np.array([[[1]]]))
with open(MODEL_LABELS_FILENAME, "wb") as f:
    pickle.dump(lb, f)

# Build the neural network!
model = Sequential()

# First convolutional layer with max pooling
model.add(Conv2D(20, (5, 5), padding="same", input_shape=(20, 20, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

# Second convolutional layer with max pooling
model.add(Conv2D(50, (5, 5), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

# Hidden layer with 500 nodes
model.add(Flatten())
model.add(Dense(500, activation="relu"))

# Output layer with 32 nodes (one for each possible letter/number we predict)
model.add(Dense(32, activation="softmax"))

# Ask Keras to build the TensorFlow model behind the scenes
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])

# Train the neural network
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), batch_size=32, epochs=1, verbose=1)

# Save the trained model to disk
model.save(MODEL_FILENAME)
open('completed'+str(time.time())+'.txt', 'a').close()
Example #30
0
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))

# Layer 4 - Convolution-Convolution-Normalizing-MaxPooling - Dropout 0.5
model.add(Conv2D(256, (3,3), padding = "valid", activation = 'relu'))
model.add(BatchNormalization(axis=-1))
model.add(Conv2D(256, (3,3), padding = "valid", activation = 'relu'))
model.add(BatchNormalization(axis=-1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))

# Layer 5 - Flattening
model.add(Flatten())

# Layer 6 - FCC Input Layer - Dropout 0.5
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))

# Layer 7 - FCC Output Layer - Activation Softmax
model.add(Dense(2))
model.add(Activation("softmax"))

# Compiling
model.compile(optimizer = currOptimizer, loss = "binary_crossentropy", metrics = ["accuracy"])

# Model Training
trainedModel = model.fit_generator(data_augemntation.flow(X_train, Y_train, batch_size = numBatches),
                        validation_data = (X_test, Y_test),
                        steps_per_epoch = len(X_train) // numBatches,
Example #31
0
def construct_mlp(saved_weights, modelStructure=None, dropOuts=None, nFeatures=nFeat):
    # construct MLP and set the layer weights to the saved weights

    if modelStructure is None:
        modelStructure = [500, 100, 20]

    if dropOuts is None:
        dropOuts = [0.8, 0.5, 0.5]

    model = Sequential()

    hiddenLayer1 = Dense(output_dim=500, activation='relu', input_dim=nFeat, trainable=False)
    #dropOut1 = Dropout(p=0.8)
    model.add(hiddenLayer1)
    hiddenLayer1.set_weights(saved_weights[0])

    hiddenLayer2 = Dense(output_dim=100, activation='relu', input_dim=500, trainable=False)
    #dropOut2 = Dropout(p=0.5)
    model.add(hiddenLayer2)
    hiddenLayer2.set_weights(saved_weights[1])

    hiddenLayer3 = Dense(output_dim=20, activation='relu', input_dim=100, trainable=False)
    #dropOut3 = Dropout(p=0.5)
    model.add(hiddenLayer3)
    hiddenLayer3.set_weights(saved_weights[2])

    outputLayer = Dense(1, input_dim=20, trainable=False)
    model.add(outputLayer)
    outputLayer.set_weights(saved_weights[3])

    adam = Adam(lr=0.00001)
    model.compile(loss='mse', optimizer=adam)
    return model
Example #32
0
    for word in words:
        if word in word2index:
            seqs.append(word2index[word])
        else:
            seqs.append(word2index["UNK"])
    X[i] = seqs
    y[i] = int(label)
    i += 1
ftrain.close()

Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=42)


EMBEDDING_SIZE = 128
HIDDEN_LAYER_SIZE = 64
BATCH_SIZE = 32
NUM_EPOCHS = 10
model = Sequential()
model.add(Embedding(vocab_size, output_dim=EMBEDDING_SIZE, input_length=MAX_SENTENCE_LENGTH))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(HIDDEN_LAYER_SIZE, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.summary()

model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(Xtrain, ytrain, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS, validation_data=(Xtest, ytest))

score, acc = model.evaluate(Xtest, ytest, batch_size=BATCH_SIZE)
print("Test score: %.3f, accuracy: %.3f" % (score, acc))
import theano

import theano.tensor as T
import matplotlib.pyplot  as plt
import pickle
import numpy as np
from keras import backend as K
import datetime

from keras.datasets import mnist
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils

model = Sequential()
model.add(Convolution2D(200, 2, 2, border_mode='same', input_shape=(1,20,20)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
d = Dense(1500, W_regularizer=l2(1e-3), activation='relu')
model.add(d)
model.add(Dropout(0.5))
model.add(Dense(1))
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
c = theano.function([d.get_input(train=False)], d.get_output(train=False))
o = c(np.random.random((1,20000)).astype('float32'))
print(d.input_shape)
Example #34
0
# X_train /= 255
# X_test /= 255


model = VGG_16("./weights/vgg16_weights.h5")
# model = convnet('VGG_16',weights_path="./weights/vgg16_weights.h5", heatmap=False)

pop_layer(model)

print len(model.layers)
for layer in model.layers:
  layer.trainable = False


layer_last = Dense(13, activation='softmax')
layer_last.trainable = True

model.add(layer_last)

sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])


if not os.path.exists("./temporal_weights"):
  os.makedirs("./temporal_weights")
checkpointer = ModelCheckpoint(filepath="./temporal_weights/weights_finetunned_vgg_terrassa.hdf5", verbose=1,
                               save_best_only=True)

if not data_augmentation:
  print('Not using data augmentation.')
Example #35
0
	def __init__(self, output_dim, hidden_dim,output_length, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
                 weights=None, truncate_gradient=-1,
                 input_dim=None, input_length=None, hidden_state=None, batch_size=None, depth=2, context_sensitive=False,
                 ):

		if not type(depth) == list:
			depth = [depth, depth]
		n_lstms = sum(depth)
		if  depth[1] < 2 and context_sensitive:
			print "Warning: Your model will not be able to remember its previous output!"
		if weights is None:
			weights = [None] * (n_lstms + 1)

		if hidden_state is None:
			hidden_state = [None] * (n_lstms + 1)

		encoder_index = depth[0] - 1
		decoder_index = depth[0] + 1

		decoder = LSTMDecoder2(dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
							  init=init,inner_init=inner_init, activation=activation, 
							  inner_activation=inner_activation,weights=weights[decoder_index],
							  truncate_gradient = truncate_gradient, 
							  hidden_state=hidden_state[decoder_index], batch_size=batch_size, remember_state=context_sensitive)

		encoder = LSTMEncoder(input_dim=input_dim, output_dim=hidden_dim,init=init,
							  inner_init=inner_init, activation=activation, 
							  inner_activation=inner_activation,weights=weights[encoder_index],
							  truncate_gradient = truncate_gradient, input_length=input_length,
							  hidden_state=hidden_state[encoder_index], batch_size=batch_size, remember_state=context_sensitive)

		left_deep = [LSTMEncoder(input_dim=input_dim, output_dim=input_dim,init=init,
							  inner_init=inner_init, activation=activation, 
							  inner_activation=inner_activation,weights=weights[i],
							  truncate_gradient = truncate_gradient, input_length=input_length,
							  hidden_state=hidden_state[i], batch_size=batch_size, return_sequences=True, remember_state=context_sensitive)
					for i in range(depth[0]-1)]


		right_deep = [LSTMEncoder(input_dim=output_dim, output_dim=output_dim,init=init,
							  inner_init=inner_init, activation=activation, 
							  inner_activation=inner_activation,weights=weights[decoder_index + 1 + i],
							  truncate_gradient = truncate_gradient, input_length=input_length,
							  hidden_state=hidden_state[decoder_index + 1 + i], batch_size=batch_size, return_sequences=True, remember_state=context_sensitive)
					for i in range(depth[1]-1)]

		dense = Dense(input_dim=hidden_dim, output_dim=output_dim)
		encoder.broadcast_state(decoder)
		if weights[depth[0]] is not None:
			dense.set_weights(weights[depth[0]])
		super(Seq2seq, self).__init__()
		for l in left_deep:
			self.add(l)
		self.add(encoder)
		self.add(dense)
		self.add(decoder)
		for l in right_deep:
			self.add(l)
		self.encoder = encoder
		self.dense = dense
		self.decoder = decoder
		self.left_deep = left_deep
		self.right_deep = right_deep
    print(
        "------------------------------------------------------\nSetting up color ANN...\n"
    )

    model_color = Sequential()
    model_color.add(
        Conv2D(1,
               filter_size,
               padding='same',
               activation='relu',
               input_shape=(image_size[0], image_size[1], 3)))
    model_color.add(BatchNormalization())
    model_color.add(MaxPooling2D(pool_size=2))
    model_color.add(Flatten())
    model_color.add(Dense(number_dense_color, activation='relu'))
    model_color.add(BatchNormalization())
    model_color.add(Dense(color_output_size, activation='softmax'))

    print(
        "------------------------------------------------------\nTraining color ANN...\n"
    )
    plot_model(model_color,
               to_file='models/color/color_ANN_model.png',
               show_shapes=True,
               show_layer_names=False)

    coloroptimizer = SGD(lr=lr_inicial_color,
                         decay=lr_inicial_color / Epochs_color)
    model_color.compile(loss="categorical_crossentropy",
                        optimizer=coloroptimizer,
Example #37
0
class DeepLSTM(Recurrent):
    '''Seq2Seq Deep Long-Short Term Memory unit.
    Inspired byt Sutskever et. al 2014

    This layer outputs ALL the states and cells like [h_0, c_0, ..., h_deeper, c_deeper].
    If you need only the very last states, use a Lambada layer to narrow
    output[:, -2*output_dim:-output_dim]

    Args: similar to regular LSTM
        depth: number of LSTMs to stack
        readout: int, if we should a final Dense layer on top or not. readout is
        this Dense's output_dim
    '''
    def __init__(self, output_dim, depth=1, readout=False, dropout=.5,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid', **kwargs):
        self.output_dim = output_dim
        self.depth = depth
        self.readout = readout
        self.dropout = dropout
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self._kwargs = kwargs
        super(DeepLSTM, self).__init__(**kwargs)

    def build(self):
        self.lstms = []
        for i in range(self.depth):
            if i == 0:
                self.lstms.append(LSTM(self.output_dim, self.init, self.inner_init,
                                  self.forget_bias_init, self.activation,
                                  self.inner_activation, **self._kwargs))
            else:
                self._kwargs['input_dim'] = self.output_dim
                self.lstms.append(LSTM(self.output_dim, self.init, self.inner_init,
                                  self.forget_bias_init, self.activation,
                                  self.inner_activation, **self._kwargs))

        [lstm.build() for lstm in self.lstms]

        # Get a flat list of trainable_weights
        self.trainable_weights = [weights for lstm in self.lstms for weights in
                                  lstm.trainable_weights]

        if self.readout:
            self.readout_layer = Dense(self.readout, input_dim=self.output_dim,
                                       activation='softmax')
            self.readout_layer.build()
            self.trainable_weights.extend(self.readout_layer.trainable_weights)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights

        if self.stateful is not None:
            self.states = []
            for lstm in self.lstms:
                self.states.extend(lstm.states)
        else:
            self.states = [None, None] * self.depth

    def reset_states(self):
        [lstm.reset_states() for lstm in self.lstms]

    def get_initial_states(self, X):
        states = super(DeepLSTM, self).get_initial_states(X)
        if self.readout:
            initial_state = K.zeros_like(X)  # (samples, timesteps, input_dim)
            initial_state = K.sum(initial_state, axis=1)  # (samples, input_dim)
            reducer = K.zeros((self.input_dim, self.readout))
            initial_state = K.dot(initial_state, reducer)  # (samples, output_dim)
            states += [initial_state]
        return states

    def step(self, x, states):
        if self.readout:
            assert len(states) == 2*self.depth+1
            states = states[:-1]
        else:
            assert len(states) == 2*self.depth

        h = []
        # P = Print('[debug] X value: ', attrs=("shape",))
        for i, (h_tm1, c_tm1) in enumerate(zip(states[:-1:2], states[1::2])):
            # x = P(x)
            x, new_states = self.lstms[i].step(x, [h_tm1, c_tm1])
            h.extend(new_states)
            # x = K.dropout(x, self.dropout)  # no dropout on the first layer inputs

        if self.readout:
            h += [self.readout_layer(h[-2])]

        return K.concatenate(h, axis=-1), h

    def dream(self, length=140):
        def _dream_step(x, states):
            # input + states
            assert len(states) == 2*self.depth + 1
            x = states[-1]
            x = K.switch(K.equal(x, K.max(x, axis=-1,
                                          keepdims=True)), 1., 0.)
            states = states[:-1]

            h = []
            for i, (h_tm1, c_tm1) in enumerate(zip(states[:-1:2], states[1::2])):
                x, new_states = self.lstms[i].step(x, [h_tm1, c_tm1])
                h.extend(new_states)

            if self.readout:
                h += [self.readout_layer(h[-2])]
                final = h[-1]
            else:
                h += [h[-2]]
                final = h[-2]

            return final, h

        # input shape: (nb_samples, time (padded with zeros), input_dim)
        # Only the very first time point of the input is used, the others only
        # server to count the lenght of the output sequence
        X = self.get_input(train=False)
        mask = self.get_input_mask(train=False)

        assert K.ndim(X) == 3
        if K._BACKEND == 'tensorflow':
            if not self.input_shape[1]:
                raise Exception('When using TensorFlow, you should define ' +
                                'explicitly the number of timesteps of ' +
                                'your sequences.\n' +
                                'If your first layer is an Embedding, ' +
                                'make sure to pass it an "input_length" ' +
                                'argument. Otherwise, make sure ' +
                                'the first layer has ' +
                                'an "input_shape" or "batch_input_shape" ' +
                                'argument, including the time axis.')
        # if self.stateful:
        #     initial_states = self.states
        # else:
        #     initial_states = self.get_initial_states(X)

        s = self.get_output(train=False)[:, -1]
        idx = [0, ] + list(np.cumsum([self.output_dim]*2*self.depth +
                                     [self.readout, ]))
        initial_states = [s[:, idx[i]:idx[i+1]] for i in range(len(idx)-1)]

        # if self.readout:
        #     initial_states.pop(-1)
        # initial_states.append(X[:, 0])

        last_output, outputs, states = K.rnn(_dream_step, K.zeros((1, length, 1)),
                                             initial_states,
                                             go_backwards=self.go_backwards,
                                             mask=mask)
        if self.stateful:
            self.updates = []
            for i in range(len(states)):
                self.updates.append((self.states[i], states[i]))

        return outputs

    def get_config(self):
        config = {"output_dim": self.output_dim,
                  "depth": self.depth,
                  "readout": self.readout,
                  "dropout": self.dropout,
                  "init": self.init.__name__,
                  "inner_init": self.inner_init.__name__,
                  "forget_bias_init": self.forget_bias_init.__name__,
                  "activation": self.activation.__name__,
                  "inner_activation": self.inner_activation.__name__}
        base_config = super(LSTM, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    @property
    def output_shape(self):
        input_shape = self.input_shape
        if self.readout:
            return input_shape[:2] + [self.output_dim*2*self.depth +
                                      self.readout]
        else:
            return input_shape[:2] + tuple([self.output_dim*2*self.depth])
Example #38
0
 def dense_bn(n):
     return [Dense(n), batch_norm(mode=1), Activation('relu')]
Example #39
0
def lstm_model(data,
               hidden_layer_neurons,
               epochs,
               feature_dimensions=1,
               verbose=False):
    """Build an LSTM model.

    Args:
        data: Data frame of X, Y values
        hidden_layer_neurons: Number of neurons per layers
        epochs: Number of iterations for learning
        feature_dimensions: Dimension of features (Number of rows per feature)

    Returns:
        model: Graph of LSTM model

    """
    # Initialize key variables
    start = time.time()
    """
    In a stateful LSTM network, you should only pass inputs with a number of
    samples that can be divided by the batch size. Hence we use "1" as it is a
    factor in any possible number of samples.
    """
    batch_size = 1

    # Process the data for fitting
    x_values, y_values = data[:, 0:-1], data[:, -1]
    x_shaped = x_values.reshape(x_values.shape[0], 1, x_values.shape[1])

    # Let's do some learning!
    model = Sequential()
    """
    The Long Short-Term Memory network (LSTM) is a type of Recurrent Neural
    Network (RNN).

    A benefit of this type of network is that it can learn and remember over
    long sequences and does not rely on a pre-specified window lagged
    observation as input.

    In Keras, this is referred to as being "stateful", and involves setting the
    "stateful" argument to "True" when defining an LSTM layer.

    By default, an LSTM layer in Keras maintains state between data within
    one batch. A batch of data is a fixed-sized number of rows from the
    training dataset that defines how many patterns (sequences) to process
    before updating the weights of the network.

    A state is:
        Where am I now inside a sequence? Which time step is it? How is this
        particular sequence behaving since its beginning up to now?

    A weight is: What do I know about the general behavior of all sequences
        I've seen so far?

    State in the LSTM layer between batches is cleared by default. This is
    undesirable therefore we must make the LSTM stateful. This gives us
    fine-grained control over when state of the LSTM layer is cleared, by
    calling the reset_states() function during the model.fit() method.

    LSTM networks can be stacked in Keras in the same way that other layer
    types can be stacked. One addition to the configuration that is required
    is that an LSTM layer prior to each subsequent LSTM layer must return the
    sequence. This can be done by setting the return_sequences parameter on
    the layer to True.

    batch_size denotes the subset size of your training sample (e.g. 100 out
    of 1000) which is going to be used in order to train the network during its
    learning process. Each batch trains network in a successive order, taking
    into account the updated weights coming from the appliance of the previous
    batch.

    return_sequence indicates if a recurrent layer of the network should return
    its entire output sequence (i.e. a sequence of vectors of specific
    dimension) to the next layer of the network, or just its last only output
    which is a single vector of the same dimension. This value can be useful
    for networks conforming with an RNN architecture.

    batch_input_shape defines that the sequential classification of the
    neural network can accept input data of the defined only batch size,
    restricting in that way the creation of any variable dimension vector.
    It is widely used in stacked LSTM networks. It is a tuple of (batch_size,
    timesteps, data_dimension)
    """
    timesteps = x_shaped.shape[1]
    data_dimension = x_shaped.shape[2]

    # Add layers to the model
    model.add(
        LSTM(units=hidden_layer_neurons,
             batch_input_shape=(batch_size, timesteps, data_dimension),
             return_sequences=True,
             stateful=True))
    model.add(Dropout(0.2))

    model.add(
        LSTM(units=hidden_layer_neurons,
             batch_input_shape=(batch_size, timesteps, data_dimension),
             return_sequences=False,
             stateful=True))
    model.add(Dropout(0.2))

    model.add(Dense(units=feature_dimensions))
    # model.add(Activation('linear'))
    """
    Once the network is specified, it must be compiled into an efficient
    symbolic representation using a backend mathematical library,
    such as TensorFlow.

    In compiling the network, we must specify a loss function and optimization
    algorithm. We will use "mean_squared_error" or "mse" as the loss function
    as it closely matches RMSE that we will are interested in, and the
    efficient ADAM optimization algorithm.
    """
    model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
    """
    Once the model is compiled, the network can be fit to the training data.
    Because the network is stateful, we must control when the internal state
    is reset. Therefore, we must manually manage the training process one epoch
    at a time across the desired number of epochs.

    By default, the samples within an epoch are shuffled prior to being exposed
    to the network. Again, this is undesirable for the LSTM because we want the
    network to build up state as it learns across the sequence of observations.
    We can disable the shuffling of samples by setting "shuffle" to "False".
    """
    for _ in range(epochs):
        model.fit(x_shaped,
                  y_values,
                  batch_size=batch_size,
                  shuffle=False,
                  epochs=1,
                  verbose=verbose,
                  validation_split=0.05)
        """
        When the fit process reaches the total length of the samples,
        model.reset_states() is called to reset the internal state at the end
        of the training epoch, ready for the next training iteration.

        This iteration will start training from the beginning of the dataset
        therefore state will need to be reset as the previous state would only
        be relevant to the prior epoch iteration.
        """
        model.reset_states()

    print('\n> Training Time: {:20.2f}'.format(time.time() - start))
    return model
Example #40
0
    def build(width, height, depth, classes):
        model = Sequential()
        inputShape = (height, width, depth)
        chanDim = -1

        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1

        model.add(
            Conv2D(32, (3, 3),
                   padding="same",
                   kernel_initializer="he_normal",
                   input_shape=inputShape))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(
            Conv2D(32, (3, 3), kernel_initializer="he_normal", padding="same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(
            Conv2D(64, (3, 3), kernel_initializer="he_normal", padding="same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(
            Conv2D(64, (3, 3), kernel_initializer="he_normal", padding="same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(
            Conv2D(128, (3, 3), kernel_initializer="he_normal",
                   padding="same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(
            Conv2D(128, (3, 3), kernel_initializer="he_normal",
                   padding="same"))
        model.add(ELU())
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(64, kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        model.add(Dense(64, kernel_initializer="he_normal"))
        model.add(ELU())
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        model.add(Dense(classes, kernel_initializer="he_normal"))
        model.add(Activation("softmax"))

        return model
Example #41
0
def tag3d_network_dense(input,
                        nb_units=64,
                        nb_dense_units=[512, 512],
                        depth=2,
                        nb_output_channels=1,
                        trainable=True):
    n = nb_units

    def conv(n, repeats=None):
        def normal(shape, name=None):
            return keras.initializations.normal(shape, scale=0.01, name=name)

        if repeats is None:
            repeats = depth
        return [[
            Convolution2D(n, 3, 3, border_mode='same', init='he_normal'),
            Activation('relu')
        ] for _ in range(repeats)]

    base = sequential(
        [
            [
                Dense(nb_dense, activation='relu')
                for nb_dense in nb_dense_units
            ],
            Dense(8 * n * 4 * 4),
            Activation('relu'),
            Reshape((
                8 * n,
                4,
                4,
            )),
            conv(8 * n),
            UpSampling2D(),  # 8x8
            conv(4 * n),
            UpSampling2D(),  # 16x16
            conv(2 * n),
        ],
        ns='tag3d_gen.base',
        trainable=trainable)(input)

    tag3d = sequential(
        [
            conv(2 * n),
            UpSampling2D(),  # 32x32
            conv(n),
            UpSampling2D(),  # 64x64
            conv(n, 1),
            Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
        ],
        ns='tag3d',
        trainable=trainable)(base)

    depth_map = sequential([
        conv(n // 2, depth - 1),
        Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
    ],
                           ns='depth_map',
                           trainable=trainable)(base)

    return name_tensor(tag3d, 'tag3d'), name_tensor(depth_map, 'depth_map')
Example #42
0
var_exp = [(i / np.sum(eigenvalues)) * 100 for i in eigenvalues]
cum_var_exp = np.cumsum(var_exp)
# print 'importance of each eigenvalue'
# print var_exp
# print 'importance of sum of eigenvalues'
# print cum_var_exp

accuracies_folds_withoutPCA = []

#withoutPCA
for train, test in kf.split(X_train.as_matrix()):
    kX_train2, kX_test2, kY_train2, kY_test2 = X_train.as_matrix(
    )[train], X_train.as_matrix()[test], Y_train[train], Y_train[test]
    model = Sequential()
    model.add(Dense(8, input_shape=(kX_train2.shape[1:])))
    #model.add(Dropout(0.2))

    #hidden layers
    # model.add(Dense(64, activation='relu')) #,W_constraint=maxnorm(1)
    # model.add(Dropout(0.2))
    model.add(Dense(7, activation='relu'))  #,W_constraint=maxnorm(1)
    # model.add(Dropout(0.5))

    #output layer
    model.add(Dense(2, activation='softmax'))
    # model.summary()

    # Compile model
    sgd = SGD(lr=learning_rate, momentum=0.7, nesterov=True)
    # adam=Adam(lr=learning_rate, beta_1=0.7, beta_2=0.999, epsilon=1e-08, decay=0.0000001)