Esempio n. 1
0
def build_part2_RNN(window_size, num_chars):

    model = Sequential()
    model.add(LSTM(200, input_shape=(window_size, num_chars)))
    #model.add(Dropout(0.5))
    model.add(Dense(num_chars, activation='softmax'))
    return model
    def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.stacked_RNN(language_model)
        language_model.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(Dropout(0.5))
        self.add(Dense(self._config.output_dim))

        self.add(RepeatVector(self._config.max_output_time_steps))
        self.add(self._config.recurrent_decoder(
                self._config.hidden_state_dim, return_sequences=True))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.add(Activation('softmax'))
    def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.temporal_pooling(language_model)
        language_model.add(DropMask())
        #language_model.add(BatchNormalization(mode=1))
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model.add(BatchNormalization(mode=1))
        self.visual_model = visual_model
        
        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
Esempio n. 4
0
 def encoders_m(self, inputs):
     input_encoder_m = Sequential()
     input_encoder_m.add(Embedding(input_dim=self.vocab_size,
                                   output_dim=64))
     input_encoder_m.add(Dropout(0.3))
     encode_m = input_encoder_m(inputs)
     return encode_m
    def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        # the below should contain all zeros
        zero_model = Sequential()
        zero_model.add(RepeatVector(self._config.max_input_time_steps)-1)
        visual_model.add(Merge[visual_model, zero_model], mode='concat')
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
Esempio n. 6
0
def define_model(lr, momentum):
    # CONFIG
    model = Sequential()

    # Create Layers
    # CONVNET
    layers = []
    #layers.append(GaussianNoise(0.02))
    layers.append(Convolution2D(8, 9, 9, activation = "relu", input_shape=(1,100,100)))
    layers.append(MaxPooling2D(pool_size=(2,2)))
    layers.append(Convolution2D(16, 7, 7, activation = "relu"))
    layers.append(MaxPooling2D(pool_size=(2,2)))
    layers.append(Convolution2D(32, 5, 5, activation = "relu"))
    layers.append(MaxPooling2D(pool_size=(2,2)))
    layers.append(Convolution2D(64, 3, 3, activation = "relu"))
    layers.append(MaxPooling2D(pool_size=(2,2)))
    layers.append(Convolution2D(250, 3, 3, activation= "relu"))
    # MLP
    layers.append(Flatten())
    layers.append(Dense(125, activation="relu"))
    layers.append(Dense(2, activation="softmax"))

    # Adding Layers
    for layer in layers:
        model.add(layer)

    # COMPILE (learning rate, momentum, objective...)
    sgd = SGD(lr=lr, momentum=momentum)

    model.compile(loss="categorical_crossentropy", optimizer=sgd)

    return model
Esempio n. 7
0
def train_model():
    # (X_train, Y_train, X_test, Y_test) = prapare_train()
    X_ = []
    with open('../data/train_matrix.out') as train_file:
        X_train = json.load(train_file)
        for x in X_train:
            a = len(x)
            print a/2
            x1 = x[:a/2]
            x2 = x[a/2:]
            x3 = []
            x3.append(x1)
            x3.append(x2)
            X_.append(x3)
    # X_test = pickle.load('../data/test_matrix.out')
    Y_train = [1,0,0]*3
    # Y_test = [1,0,0]*3
    # print len(X_train) - len(Y_train)
    # print len(X_test) - len(Y_test)
    model = Sequential()
    model = get_nn_model()
    model.compile(loss='binary_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])
    # model.fit(X_train, Y_train,
    #       batch_size=batch_size,
    #       nb_epoch=nb_epoch,
    #       validation_data=(X_test, Y_test))
#2
    model.fit(X_, Y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          validation_split = 0.2)
    print 'ok'
Esempio n. 8
0
def getVggModel():
    """Pretrained VGG16 model with fine-tunable last two layers"""
    input_image = Input(shape = (160,320,3))
    
    model = Sequential()
    model.add(Lambda(lambda x: x/255.0 -0.5,input_shape=(160,320,3)))
    model.add(Cropping2D(cropping=((70,25),(0,0))))
    
    base_model = VGG16(input_tensor=input_image, include_top=False)
        
    for layer in base_model.layers[:-3]:
        layer.trainable = False

    W_regularizer = l2(0.01)

    x = base_model.get_layer("block5_conv3").output
    x = AveragePooling2D((2, 2))(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    x = Flatten()(x)
    x = Dense(4096, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dropout(0.5)(x)
    x = Dense(2048, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dense(2048, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dense(1, activation="linear")(x)
    return Model(input=input_image, output=x)
class QLearn:
    def __init__(self, actions, epsilon, alpha, gamma):
        
        # instead of a dictionary, we'll be using
        #   a neural network
        # self.q = {}
        self.epsilon = epsilon  # exploration constant
        self.alpha = alpha      # discount constant
        self.gamma = gamma      # discount factor
        self.actions = actions
        
        # Build the neural network
        self.network = Sequential()
        self.network.add(Dense(50, init='lecun_uniform', input_shape=(4,)))
        # self.network.add(Activation('sigmoid'))
        #self.network.add(Dropout(0.2))

        self.network.add(Dense(20, init='lecun_uniform'))
        # self.network.add(Activation('sigmoid'))
        # #self.network.add(Dropout(0.2))

        self.network.add(Dense(2, init='lecun_uniform'))
        # self.network.add(Activation('linear')) #linear output so we can have range of real-valued outputs

        # rms = SGD(lr=0.0001, decay=1e-6, momentum=0.5) # explodes to non
        rms = RMSprop()
        # rms = Adagrad()
        # rms = Adam()
        self.network.compile(loss='mse', optimizer=rms)
        # Get a summary of the network
        self.network.summary()
Esempio n. 10
0
    def make_fc_model(self):
        '''
        creates a fully convolutional model from self.model
        '''
        # get index of first dense layer in model
        behead_ix = self._get_behead_index(self.model_layer_names)
        model_layers = self.model.layers[:behead_ix]
        # shape of image entering FC layers
        inp_shape = self.model.layers[behead_ix - 1].get_output_shape_at(-1)

        # replace dense layers with convolutions
        model = Sequential()
        model_layers += [Convolution2D(2048, 1, 1)]
        model_layers += [Activation('relu')]
        model_layers += [Convolution2D(2048, 1, 1)]
        model_layers += [Activation('relu')]
        model_layers += [Convolution2D(self.nb_classes, inp_shape[-1], inp_shape[-1])]
        # must be same shape as target vector (None, num_classes, 1)
        model_layers += [Reshape((self.nb_classes-1,1))]
        model_layers += [Activation('softmax')]

        print 'Compiling Fully Convolutional Model...'
        for process in model_layers:
            model.add(process)
        sgd = SGD(lr=self.lr_1, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        print 'Done.'
        return model
    def __init__(self, restore=None, session=None, Dropout=Dropout, num_labels=10):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = num_labels

        model = Sequential()

        nb_filters = 64
        layers = [Conv2D(nb_filters, (5, 5), strides=(2, 2), padding="same",
                         input_shape=(28, 28, 1)),
                  Activation('relu'),
                  Conv2D(nb_filters, (3, 3), strides=(2, 2), padding="valid"),
                  Activation('relu'),
                  Conv2D(nb_filters, (3, 3), strides=(1, 1), padding="valid"),
                  Activation('relu'),
                  Flatten(),
                  Dense(32),
                  Activation('relu'),
                  Dropout(.5),
                  Dense(num_labels)]

        for layer in layers:
            model.add(layer)

        if restore != None:
            model.load_weights(restore)
        
        self.model = model
Esempio n. 12
0
 def __init__(self):
     model = Sequential()
     model.add(Embedding(115227, 50, input_length=75, weights=pre_weights))
     model.compile(loss=MCE, optimizer="adadelta")
     print "Build Network Completed..."
     self.model = model
     self.vocab = {"get_index":{}, "get_word":[]}
Esempio n. 13
0
def Simple(layers, func, ipt):
    model = Sequential()
    #model.add(BatchNormalization(input_shape = [ipt]))
    model.add(Dense(layers[0], input_dim = ipt, activation = func[0]))
    for i in range(1, len(layers)):
        model.add(Dense(layers[i], activation = func[i]))
    return model
Esempio n. 14
0
File: test.py Progetto: aasensio/EST
class trainCNN(object):

    def __init__(self):

        self.X = np.random.randn(200,1)
        self.Y = 1.2*self.X**2 + 0.5

    def defineCNN(self):
        print("Setting up network...")
        self.model = Sequential()
        self.model.add(Dense(40, input_shape=(1,)))
        self.model.add(Activation('tanh'))
        self.model.add(Dense(1))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='mse', optimizer='RMSprop')

    def trainCNN(self, nIterations):
        print("Training network...")
        self.metrics = self.model.fit(self.X, self.Y, batch_size=20, nb_epoch=nIterations, validation_split=0.2, shuffle=False)
        # self.model.fit(self.XTrainSet, self.YTrainSet, batch_size=self.batchSize, nb_epoch=self.nbEpoch, validation_split=0.2)

    def testCNN(self):
        train = self.model.predict(self.X)
        pl.plot(self.X, self.Y, '.')
        pl.plot(self.X, train, 'x')
Esempio n. 15
0
def test_dropout(layer_class):
    for unroll in [True, False]:
        layer_test(layer_class,
                   kwargs={'units': units,
                           'dropout': 0.1,
                           'recurrent_dropout': 0.1,
                           'unroll': unroll},
                   input_shape=(num_samples, timesteps, embedding_dim))

        # Test that dropout is applied during training
        x = K.ones((num_samples, timesteps, embedding_dim))
        layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
                            input_shape=(timesteps, embedding_dim))
        y = layer(x)
        assert y._uses_learning_phase

        y = layer(x, training=True)
        assert not getattr(y, '_uses_learning_phase')

        # Test that dropout is not applied during testing
        x = np.random.random((num_samples, timesteps, embedding_dim))
        layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
                            unroll=unroll,
                            input_shape=(timesteps, embedding_dim))
        model = Sequential([layer])
        assert model.uses_learning_phase
        y1 = model.predict(x)
        y2 = model.predict(x)
        assert_allclose(y1, y2)
Esempio n. 16
0
def test_recurrent_wrapper__simple_rnn__output_model():
    """The hidden state is not returned, but mapped by an output model.
    """
    def recurrent_layer():
        hidden = Input((128,))
        input = Input((10,))

        x = Dense(128, activation='relu')(input)
        x = merge([hidden, x], mode='sum')
        new_hidden = Activation('sigmoid')(x)
        output = Dense(64)(x)

        return RecurrentWrapper(
            input=[input],
            output=[output],
            bind={hidden: new_hidden},
            return_sequences=True,
        )

    m = Sequential([
        InputLayer(input_shape=(None, 10)),
        recurrent_layer(),
    ])

    assert m.predict(np.random.uniform(size=(30, 20, 10))).shape == (30, 20, 64)
Esempio n. 17
0
def test_replace_with_mask_layer__no_mask():
    m = Sequential([
        ReplaceWithMaskLayer(input_shape=(None, 10)),
    ])
    actual = m.predict(np.random.uniform(size=(30, 20, 10)))
    expected = np.ones((30, 20, 1))
    np.testing.assert_allclose(actual, expected)
def build_siamese(input_model_1, input_model_2, input_dim, output_dim):
    """

    :param input_model_1:
    :type input_model_1:
    :param input_model_2:
    :type input_model_2:
    :param input_dim: last layer input
    :type input_dim:
    :param output_dim: last layer output
    :type output_dim:
    :return:
    :rtype:
    """

    inputs = [input_model_1, input_model_2]

    layer = Dense(input_dim=input_dim, output_dim=output_dim)

    model = Sequential()
    # mode: one of {sum, mul, concat, ave, join, cos, dot}.
    model.add(Siamese(layer, inputs, 'sum'))

    # model.compile(loss='mse', optimizer='sgd')
    return model
Esempio n. 19
0
def create_mlp_network(input_dim):
    seq = Sequential()
    seq.add(SparseFullyConnectedLayer(300, input_dim = input_dim, activation = "relu"))
    # seq.add(Dense(300, input_dim = input_dim, activation = "relu"))
    seq.add(Dense(300, activation = "relu"))
    seq.add(Dense(128, activation = "relu"))
    return seq
Esempio n. 20
0
 def build(self, layers):
     model = Sequential()
     
     for layer in layers:
         model.add(layer)
     
     self.model = TimeDistributed(model)
Esempio n. 21
0
def create_model(insert=None):
    '''Create the basic model'''

    model = Sequential()

    layers = [Convolution2D(NB_FILTERS, NB_CONV, NB_CONV,
                            border_mode='valid',
                            input_shape=(1, IMGROWS, IMGCOLS)),
              Activation('relu'),
              MaxPooling2D(pool_size=(NB_POOL, NB_POOL)),
              Dropout(0.25),
              Flatten(),
              Dense(128),
              Activation('relu'),
              Dropout(0.5),
              Dense(NB_CLASSES),
              Activation('softmax')]

    if insert is not None:
        for l in insert['layers']:
            layers.insert(insert['insert_pos'], l)

    for layer in layers:
        model.add(layer)

    return model
Esempio n. 22
0
def build_part1_RNN(window_size):

    model = Sequential()
    model.add(LSTM(5, input_shape=(window_size,1) ))
    #model.add(Dropout(0.5))
    model.add(Dense(1))
    return model
Esempio n. 23
0
 def encoders_c(self, inputs):
     input_encoder_c = Sequential()
     input_encoder_c.add(Embedding(input_dim=self.vocab_size,
                                   output_dim=self.query_maxlen))
     input_encoder_c.add(Dropout(0.3))
     encoder_c = input_encoder_c(inputs)
     return encoder_c
Esempio n. 24
0
def build_partial_cnn1(img_rows, img_cols):
    model = Sequential()
    #model.add(Convolution2D(nb_filter=100, nb_row=5, nb_col=5,
    model.add(Convolution2D(nb_filter=10, nb_row=2, nb_col=2,
                            init='glorot_uniform', activation='linear',
                            border_mode='valid',
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))

    #model.add(MaxPooling2D(pool_size=(2, 2)))

    #model.add(Convolution2D(nb_filter=100, nb_row=5, nb_col=5,
    '''model.add(Convolution2D(nb_filter=512, nb_row=5, nb_col=5,
                            init='glorot_uniform', activation='linear',
                            border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))'''

    return model
Esempio n. 25
0
def create_model(kernel_regularizer=None, activity_regularizer=None):
    model = Sequential()
    model.add(Dense(num_classes,
                    kernel_regularizer=kernel_regularizer,
                    activity_regularizer=activity_regularizer,
                    input_shape=(data_dim,)))
    return model
def get_item_subgraph(input_shape, latent_dim):
    # Could take item metadata here, do convolutional layers etc.

    model = Sequential()
    model.add(Dense(latent_dim, input_shape=input_shape))

    return model
Esempio n. 27
0
def fork (model, n=2):
    forks = []
    for i in range(n):
        f = Sequential()
        f.add (model)
        forks.append(f)
    return forks
Esempio n. 28
0
 def conv2d_work(input_dim):
     seq = Sequential()
     assert self.config['num_conv2d_layers'] > 0
     for i in range(self.config['num_conv2d_layers']):
         seq.add(Conv2D(filters=self.config['2d_kernel_counts'][i], kernel_size=self.config['2d_kernel_sizes'][i], padding='same', activation='relu'))
         seq.add(MaxPooling2D(pool_size=(self.config['2d_mpool_sizes'][i][0], self.config['2d_mpool_sizes'][i][1])))
     return seq
Esempio n. 29
0
	def question_encoder(self, dropout=0.3):
		question_encoder = Sequential()
		question_encoder.add(Embedding(input_dim=vocab_size,
                               output_dim=64,
                               input_length=query_maxlen))
		question_encoder.add(Dropout(dropout))
		self._question_encoder = question_encoder
Esempio n. 30
0
def test_recurrent_wrapper__simple_rnn__no_sequences():
    """Return only the latest step in the sequence
    """
    def recurrent_layer():
        hidden = Input((128,))
        input = Input((10,))

        x = Dense(128, activation='relu')(input)
        x = merge([hidden, x], mode='sum')
        new_hidden = Activation('sigmoid')(x)

        return RecurrentWrapper(
            input=[input],
            output=[new_hidden],
            bind={hidden: new_hidden},
            return_sequences=False,
        )

    m = Sequential([
        InputLayer(input_shape=(None, 10)),
        recurrent_layer(),
    ])

    result = m.predict(np.random.uniform(size=(30, 20, 10)))

    # map into hidden state
    assert result.shape == (30, 128)
Esempio n. 31
0
    def __init__(self, config, data):
        self.config = config
        self.data = data
        self.model = Sequential()

        self.build_model()
Esempio n. 32
0
class Model_Truong:
    def __init__(self, config, data):
        #super(Model_CUI_CNN3, self).__init__()
        self.config = config
        self.data = data
        self.model = Sequential()

        self.build_model()

    def build_model(self):
        #nach Truong et al.
        #input-layer is not added as a layer!
        self.model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   activation='relu',
                   input_shape=self.config["input_shape"]))
        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        #        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        #        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        #        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
        #        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))

        #        self.model.add(GlobalAveragePooling2D())

        self.model.add(Flatten())
        self.model.add(Dense(304, activation='relu'))
        self.model.add(Dense(self.data.nClasses, activation='softmax'))

        print('Model created successfully.')
Esempio n. 33
0
class SuperSimpleLSTMClassifierRandomEmbedding:
    def __init__(self, max_seq_len, n_classes):

        nb_words = 17490
        embed_dim = 1024

        self.model = Sequential()
        self.model.add(
            Embedding(nb_words,
                      embed_dim,
                      input_length=max_seq_len,
                      trainable=True))
        self.model.add(Dropout(0.5))
        self.model.add(LSTM(128))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(n_classes, activation='sigmoid'))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='rmsprop',
                           metrics=['accuracy'])
        self.model.summary()
Esempio n. 34
0
class Model_Sharma_addConv:
    def __init__(self, config, data):
        self.config = config
        self.data = data
        self.model = Sequential()

        self.build_model()

    def build_model(self):
        #nach Cui et al.
        #input-layer is not added as a layer!
        self.model.add(
            Conv2D(
                8,
                (3, 3),
                padding='same',
                activation='relu',
                #activity_regularizer=l2(0.001),
                input_shape=self.config["input_shape"]))
        self.model.add(
            Conv2D(16, (3, 3), padding='same',
                   activation='relu'))  #, activity_regularizer=l2(0.001)))
        self.model.add(
            Conv2D(32, (3, 3), padding='same',
                   activation='relu'))  #, activity_regularizer=l2(0.001)))
        self.model.add(
            Conv2D(64, (3, 3), padding='same',
                   activation='relu'))  #, activity_regularizer=l2(0.001)))
        self.model.add(
            Conv2D(128, (3, 3), padding='same',
                   activation='relu'))  #, activity_regularizer=l2(0.001)))
        self.model.add(Conv2D(264, (3, 3), padding='same', activation='relu'))
        self.model.add(Flatten())
        self.model.add(Dense(
            (8 * 8 * 128),
            activation='relu'))  #, activity_regularizer=l2(0.001)))
        #self.model.add(Dropout(0.5))
        #classification in 2 classes
        self.model.add(Dense(self.data.nClasses, activation='softmax'))

        print('Model created successfully.')
Esempio n. 35
0
    #----------------------- Data Normalization
    x_train = x_train.astype('float32')
    x_val = x_val.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_val /= 255
    x_test /= 255
    #--------------------- Checks---------------------------
    if K.image_data_format() != "channels_last":
        K.set_image_data_format("channels_last")


    # -----------------  MODEL  ----------------------
    input_shape = (img_size, img_size, 3)

    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
Esempio n. 36
0
class BidirectionalLSTMClassifier:
    def __init__(self, embedding_matrix, max_seq_len, n_classes):
        nb_words = embedding_matrix.shape[0]
        embed_dim = embedding_matrix.shape[1]

        self.model = Sequential()
        self.model.add(
            Embedding(nb_words,
                      embed_dim,
                      weights=[embedding_matrix],
                      input_length=max_seq_len,
                      trainable=True))
        self.model.add(Dropout(0.5))
        self.model.add(Bidirectional(LSTM(128)))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(n_classes, activation='sigmoid'))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='rmsprop',
                           metrics=['accuracy'])
        self.model.summary()
Esempio n. 37
0
with h5py.File(''.join(['bitcoin2015to2017_close.h5']), 'r') as hf:
    datas = hf['inputs'].value
    labels = hf['outputs'].value




step_size = datas.shape[1]
units= 50
second_units = 30
batch_size = 8
nb_features = datas.shape[2]
epochs = 100
output_size=16
output_file_name='bitcoin2015to2017_close_LSTM_1_tanh_leaky_'
#split training validation
training_size = int(0.8* datas.shape[0])
training_datas = datas[:training_size,:]
training_labels = labels[:training_size,:,0]
validation_datas = datas[training_size:,:]
validation_labels = labels[training_size:,:,0]


#build model
model = Sequential()
model.add(LSTM(units=units,activation='tanh', input_shape=(step_size,nb_features),return_sequences=False))
model.add(Dropout(0.8))
model.add(Dense(output_size))
model.add(LeakyReLU())
model.compile(loss='mse', optimizer='adam')
model.fit(training_datas, training_labels, batch_size=batch_size,validation_data=(validation_datas,validation_labels), epochs = epochs, callbacks=[CSVLogger(output_file_name+'.csv', append=True),ModelCheckpoint('weights/'+output_file_name+'-{epoch:02d}-{val_loss:.5f}.hdf5', monitor='val_loss', verbose=1,mode='min')])
Esempio n. 38
0
class CNNRandomEmbedding:

    rep_max = -100000.0
    rep_size = 0

    def __init__(self,
                 max_seq_len,
                 n_classes,
                 num_filters=64,
                 weight_decay=1e-4):
        nb_words = 17490
        embed_dim = 1024
        self.model = Sequential()
        self.model.add(
            Embedding(nb_words,
                      embed_dim,
                      input_length=max_seq_len,
                      trainable=True))
        self.model.add(Dropout(0.25))
        self.model.add(
            Conv1D(num_filters, 7, activation='relu', padding='same'))
        self.model.add(MaxPooling1D(2))
        self.model.add(
            Conv1D(num_filters, 7, activation='relu', padding='same'))
        self.model.add(GlobalMaxPooling1D())
        self.model.add(Dropout(0.5))
        self.model.add(
            Dense(32,
                  activation='relu',
                  kernel_regularizer=regularizers.l2(weight_decay)))
        self.model.add(Dense(
            n_classes, activation='sigmoid'))  #multi-label (k-hot encoding)

        adam = optimizers.Adam(lr=0.001,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1e-08,
                               decay=0.0)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['accuracy'])
        self.model.summary()
    def build_discriminator(self):

        model = Sequential()

        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Conv1D, MaxPooling1D
from keras import optimizers


max_features = 26
embedding_size = 256
kernel_size = 5
filters = 250
pool_size = 2
lstm_output_size = 64



#print('Building model...')
model = Sequential()
model.add(Embedding(max_features, embedding_size))
model.add(Dropout(0.2))
model.add(Conv1D(filters, kernel_size,padding ='valid',activation = 'relu',strides = 1))
model.add(MaxPooling1D(pool_size = pool_size))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss = 'binary_crossentropy',optimizer = optimizers.Adam(),metrics = ['acc'])


Esempio n. 41
0
X_test = numpy.array([[1] * 128] * (10 ** 2) + [[0] * 128] * (10 ** 2))

Y_train = numpy.array([True] * (10 ** 4) + [False] * (10 ** 4))
Y_test = numpy.array([True] * (10 ** 2) + [False] * (10 ** 2))

X_train = X_train.astype("float32")
X_test = X_test.astype("float32")

Y_train = Y_train.astype("bool")
Y_test = Y_test.astype("bool")

# build deep learning model
from keras.optimizers import RMSprop
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout
model = Sequential()
# takes a 128 vector as input and outputs a 50 node layer, densely connected
model.add(Dense(50, input_dim=128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# model.add(Dense(1, init='normal')) # for regression - just end here, no sigmoid layer
model.add(Dense(1)) # for classification
model.add(Activation('sigmoid')) # for classification, must add this

rms = RMSprop()
model.compile(loss='binary_crossentropy', optimizer=rms, metrics=['accuracy'])

batch_size = 32
Esempio n. 42
0
from keras.models import load_model
import sys

from keras.models import Sequential
from keras.models import load_model

sys.path.append('./utils')
from dense import myDense
from conv2d import myConv2d
from maxpool import maxpool
from sequence import DataGenerator
from os import listdir

from pycm import ConfusionMatrix

model = Sequential()

filelist = []
labels = []

for file in listdir('./encoded/NORMAL'):
    filelist.append('./encoded/NORMAL/{}'.format(file))
    labels.append(0)

for file in listdir('./encoded/PNEUMONIA'):
    filelist.append('./encoded/PNEUMONIA/{}'.format(file))
    labels.append(1)

generator = DataGenerator(filelist, labels)

model = load_model('./model.h5',
Esempio n. 43
0
def generate_GRU_mode(number_classes):
    model = Sequential()
    model.add(Embedding(MAX_FEATURES, 128))
    model.add(GRU(32, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
    model.add(GRU(64, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
    model.add(GRU(128, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dense(number_classes, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    return model
    def build_generator(self):

        model = Sequential()

        model.add(Dense(256, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img)
Esempio n. 45
0
def run_experiment(max_len, dropout_rate, n_layers):

    global dataset, train_ids, valid_ids, test_ids, mode, task, val_method, val_mode, use_PCA

    # for PCA if set to True
    visual_components = 25
    audio_components = 20
    text_components = 110

    nodes = 100
    epochs = 200
    outfile = "MOSI_sweep/late_" + mode + "_" + str(task) + "_" + str(
        n_layers) + "_" + str(max_len) + "_" + str(dropout_rate)
    experiment_prefix = "late"
    batch_size = 64
    logs_path = "regression_logs/"
    experiment_name = "{}_n_{}_dr_{}_nl_{}_ml_{}".format(
        experiment_prefix, nodes, dropout_rate, n_layers, max_len)

    # sort through all the video ID, segment ID pairs
    train_set_ids = []
    for vid in train_ids:
        for sid in dataset['embeddings'][vid].keys():
            if mode == "all" or mode == "AV":
                if dataset['embeddings'][vid][sid] and dataset['facet'][vid][
                        sid] and dataset['covarep'][vid][sid]:
                    train_set_ids.append((vid, sid))
            if mode == "AT" or mode == "A":
                if dataset['embeddings'][vid][sid] and dataset['covarep'][vid][
                        sid]:
                    train_set_ids.append((vid, sid))
            if mode == "VT" or mode == "V":
                if dataset['embeddings'][vid][sid] and dataset['facet'][vid][
                        sid]:
                    train_set_ids.append((vid, sid))
            if mode == "T":
                if dataset['embeddings'][vid][sid]:
                    train_set_ids.append((vid, sid))

    valid_set_ids = []
    for vid in valid_ids:
        for sid in dataset['embeddings'][vid].keys():
            if mode == "all" or mode == "AV":
                if dataset['embeddings'][vid][sid] and dataset['facet'][vid][
                        sid] and dataset['covarep'][vid][sid]:
                    valid_set_ids.append((vid, sid))
            if mode == "AT" or mode == "A":
                if dataset['embeddings'][vid][sid] and dataset['covarep'][vid][
                        sid]:
                    valid_set_ids.append((vid, sid))
            if mode == "VT" or mode == "V":
                if dataset['embeddings'][vid][sid] and dataset['facet'][vid][
                        sid]:
                    valid_set_ids.append((vid, sid))
            if mode == "T":
                if dataset['embeddings'][vid][sid]:
                    valid_set_ids.append((vid, sid))

    test_set_ids = []
    for vid in test_ids:
        if vid in dataset['embeddings']:
            for sid in dataset['embeddings'][vid].keys():
                if mode == "all" or mode == "AV":
                    if dataset['embeddings'][vid][sid] and dataset['facet'][
                            vid][sid] and dataset['covarep'][vid][sid]:
                        test_set_ids.append((vid, sid))
                if mode == "AT" or mode == "A":
                    if dataset['embeddings'][vid][sid] and dataset['covarep'][
                            vid][sid]:
                        test_set_ids.append((vid, sid))
                if mode == "VT" or mode == "V":
                    if dataset['embeddings'][vid][sid] and dataset['facet'][
                            vid][sid]:
                        test_set_ids.append((vid, sid))
                if mode == "T":
                    if dataset['embeddings'][vid][sid]:
                        test_set_ids.append((vid, sid))

    # partition the training, valid and test set. all sequences will be padded/truncated to 15 steps
    # data will have shape (dataset_size, max_len, feature_dim)
    if mode == "all" or mode == "AV" or mode == "AT":
        train_set_audio = np.stack([
            pad(dataset['covarep'][vid][sid], max_len)
            for (vid, sid) in train_set_ids if dataset['covarep'][vid][sid]
        ],
                                   axis=0)
        valid_set_audio = np.stack([
            pad(dataset['covarep'][vid][sid], max_len)
            for (vid, sid) in valid_set_ids if dataset['covarep'][vid][sid]
        ],
                                   axis=0)
        test_set_audio = np.stack([
            pad(dataset['covarep'][vid][sid], max_len)
            for (vid, sid) in test_set_ids if dataset['covarep'][vid][sid]
        ],
                                  axis=0)
    if mode == "all" or mode == "VT" or mode == "AV":
        train_set_visual = np.stack([
            pad(dataset['facet'][vid][sid], max_len)
            for (vid, sid) in train_set_ids if dataset['facet'][vid][sid]
        ],
                                    axis=0)
        valid_set_visual = np.stack([
            pad(dataset['facet'][vid][sid], max_len)
            for (vid, sid) in valid_set_ids if dataset['facet'][vid][sid]
        ],
                                    axis=0)
        test_set_visual = np.stack([
            pad(dataset['facet'][vid][sid], max_len)
            for (vid, sid) in test_set_ids if dataset['facet'][vid][sid]
        ],
                                   axis=0)

    if mode == "all" or mode == "VT" or mode == "AT":
        train_set_text = np.stack([
            pad(dataset['embeddings'][vid][sid], max_len)
            for (vid, sid) in train_set_ids if dataset['embeddings'][vid][sid]
        ],
                                  axis=0)
        valid_set_text = np.stack([
            pad(dataset['embeddings'][vid][sid], max_len)
            for (vid, sid) in valid_set_ids if dataset['embeddings'][vid][sid]
        ],
                                  axis=0)
        test_set_text = np.stack([
            pad(dataset['embeddings'][vid][sid], max_len)
            for (vid, sid) in test_set_ids if dataset['embeddings'][vid][sid]
        ],
                                 axis=0)

    if task == "SB":
        # binarize the sentiment scores for binary classification task
        y_train = np.array(
            [sentiments[vid][sid] for (vid, sid) in train_set_ids]) > 0
        y_valid = np.array(
            [sentiments[vid][sid] for (vid, sid) in valid_set_ids]) > 0
        y_test = np.array(
            [sentiments[vid][sid] for (vid, sid) in test_set_ids]) > 0

    if task == "SR":
        y_train = np.array(
            [sentiments[vid][sid] for (vid, sid) in train_set_ids])
        y_valid = np.array(
            [sentiments[vid][sid] for (vid, sid) in valid_set_ids])
        y_test = np.array(
            [sentiments[vid][sid] for (vid, sid) in test_set_ids])

    if task == "S5":
        y_train1 = np.array(
            [sentiments[vid][sid] for (vid, sid) in train_set_ids])
        y_valid1 = np.array(
            [sentiments[vid][sid] for (vid, sid) in valid_set_ids])
        y_test1 = np.array(
            [sentiments[vid][sid] for (vid, sid) in test_set_ids])
        y_train = convert_S5_hot(y_train1)
        y_valid = convert_S5_hot(y_valid1)
        y_test = convert_S5_hot(y_test1)

    # normalize covarep and facet features, remove possible NaN values
    if mode == "all" or mode == "AV" or mode == "VT":
        visual_max = np.max(np.max(np.abs(train_set_visual), axis=0), axis=0)
        visual_max[visual_max ==
                   0] = 1  # if the maximum is 0 we don't normalize
        train_set_visual = train_set_visual / visual_max
        valid_set_visual = valid_set_visual / visual_max
        test_set_visual = test_set_visual / visual_max
        train_set_visual[train_set_visual != train_set_visual] = 0
        valid_set_visual[valid_set_visual != valid_set_visual] = 0
        test_set_visual[test_set_visual != test_set_visual] = 0

    if mode == "all" or mode == "AT" or mode == "AV":
        audio_max = np.max(np.max(np.abs(train_set_audio), axis=0), axis=0)
        train_set_audio = train_set_audio / audio_max
        valid_set_audio = valid_set_audio / audio_max
        test_set_audio = test_set_audio / audio_max
        train_set_audio[train_set_audio != train_set_audio] = 0
        valid_set_audio[valid_set_audio != valid_set_audio] = 0
        test_set_audio[test_set_audio != test_set_audio] = 0

    if use_PCA == True:
        if mode == "all" or mode == "AV" or mode == "VT":
            nsamples1, nx1, ny1 = train_set_visual.shape
            train_set_visual = train_set_visual.reshape(nsamples1 * nx1, ny1)
            nsamples2, nx2, ny2 = valid_set_visual.shape
            valid_set_visual = valid_set_visual.reshape(nsamples2 * nx2, ny2)
            nsamples3, nx3, ny3 = test_set_visual.shape
            test_set_visual = test_set_visual.reshape(nsamples3 * nx3, ny3)
            pca = decomposition.PCA(n_components=visual_components)
            train_set_visual_pca = pca.fit_transform(train_set_visual)
            valid_set_visual_pca = pca.transform(valid_set_visual)
            test_set_visual_pca = pca.transform(test_set_visual)
            train_set_visual = train_set_visual_pca.reshape(
                nsamples1, nx1, visual_components)
            valid_set_visual = valid_set_visual_pca.reshape(
                nsamples2, nx2, visual_components)
            test_set_visual = test_set_visual_pca.reshape(
                nsamples3, nx3, visual_components)

        if mode == "all" or mode == "AT" or mode == "AV":
            nsamples1, nx1, ny1 = train_set_audio.shape
            train_set_audio = train_set_audio.reshape(nsamples1 * nx1, ny1)
            nsamples2, nx2, ny2 = valid_set_audio.shape
            valid_set_audio = valid_set_audio.reshape(nsamples2 * nx2, ny2)
            nsamples3, nx3, ny3 = test_set_audio.shape
            test_set_audio = test_set_audio.reshape(nsamples3 * nx3, ny3)
            pca = decomposition.PCA(n_components=audio_components)
            train_set_audio_pca = pca.fit_transform(train_set_audio)
            valid_set_audio_pca = pca.transform(valid_set_audio)
            test_set_audio_pca = pca.transform(test_set_audio)
            train_set_audio = train_set_audio_pca.reshape(
                nsamples1, nx1, audio_components)
            valid_set_audio = valid_set_audio_pca.reshape(
                nsamples2, nx2, audio_components)
            test_set_audio = test_set_audio_pca.reshape(
                nsamples3, nx3, audio_components)

        if mode == "all" or mode == "AT" or mode == "VT":
            nsamples1, nx1, ny1 = train_set_text.shape
            train_set_text = train_set_text.reshape(nsamples1 * nx1, ny1)
            nsamples2, nx2, ny2 = valid_set_text.shape
            valid_set_text = valid_set_text.reshape(nsamples2 * nx2, ny2)
            nsamples3, nx3, ny3 = test_set_text.shape
            test_set_text = test_set_text.reshape(nsamples3 * nx3, ny3)
            pca = decomposition.PCA(n_components=text_components)
            train_set_text_pca = pca.fit_transform(train_set_text)
            valid_set_text_pca = pca.transform(valid_set_text)
            test_set_text_pca = pca.transform(test_set_text)
            train_set_text = train_set_text_pca.reshape(
                nsamples1, nx1, text_components)
            valid_set_text = valid_set_text_pca.reshape(
                nsamples2, nx2, text_components)
            test_set_text = test_set_text_pca.reshape(nsamples3, nx3,
                                                      text_components)

    k = 3
    m = 2
    if task == "SB":
        val_method = "val_acc"
        val_mode = "max"
        emote_final = 'sigmoid'
        last_node = 1
    if task == "SR":
        val_method = "val_loss"
        val_mode = "min"
        emote_final = 'linear'
        last_node = 1
    if task == "S5":
        val_method = "val_acc"
        val_mode = "max"
        emote_final = 'softmax'
        last_node = 5
    model = Sequential()

    # AUDIO
    if mode == "all" or mode == "AT" or mode == "AV":
        model1_in = Input(shape=(max_len, train_set_audio.shape[2]))
        model1_cnn = Conv1D(filters=64, kernel_size=k,
                            activation='relu')(model1_in)
        model1_mp = MaxPooling1D(m)(model1_cnn)
        model1_fl = Flatten()(model1_mp)
        model1_dropout = Dropout(dropout_rate)(model1_fl)
        model1_dense = Dense(nodes, activation="relu")(model1_dropout)
        model1_out = Dense(last_node, activation=emote_final)(model1_dense)

    # TEXT = BLSTM from unimodal
    if mode == "all" or mode == "AT" or mode == "VT":
        model2_in = Input(shape=(max_len, train_set_text.shape[2]))
        model2_lstm = Bidirectional(LSTM(64))(model2_in)
        model2_dropout = Dropout(dropout_rate)(model2_lstm)
        model2_dense = Dense(nodes, activation="relu")(model2_dropout)
        model2_out = Dense(last_node, activation=emote_final)(model2_dense)

    # VIDEO - CNN from unimodal
    if mode == "all" or mode == "AV" or mode == "VT":
        model3_in = Input(shape=(max_len, train_set_visual.shape[2]))
        model3_cnn = Conv1D(filters=64, kernel_size=k,
                            activation='relu')(model3_in)
        model3_mp = MaxPooling1D(m)(model3_cnn)
        model3_fl = Flatten()(model3_mp)
        model3_dropout = Dropout(dropout_rate)(model3_fl)
        model3_dense = Dense(nodes, activation="relu")(model3_dropout)
        model3_out = Dense(last_node, activation=emote_final)(model3_dense)

    if mode == "all":
        concatenated = concatenate([model1_out, model2_out, model3_out])
    if mode == "AV":
        concatenated = concatenate([model1_out, model3_out])
    if mode == "AT":
        concatenated = concatenate([model1_out, model2_out])
    if mode == "VT":
        concatenated = concatenate([model2_out, model3_out])

    out = Dense(last_node, activation=emote_final)(concatenated)

    if mode == "all":
        merged_model = Model([model1_in, model2_in, model3_in], out)
    if mode == "AV":
        merged_model = Model([model1_in, model3_in], out)
    if mode == "AT":
        merged_model = Model([model1_in, model2_in], out)
    if mode == "VT":
        merged_model = Model([model2_in, model3_in], out)

    if task == "SB":
        merged_model.compile('adam',
                             'binary_crossentropy',
                             metrics=['accuracy'])
    if task == "S5":
        merged_model.compile('adam',
                             'binary_crossentropy',
                             metrics=['accuracy'])
    if task == "SR":
        merged_model.compile('adam', loss='mean_absolute_error')

    if mode == "all":
        x_train = [train_set_audio, train_set_text, train_set_visual]
        x_valid = [valid_set_audio, valid_set_text, valid_set_visual]
        x_test = [test_set_audio, test_set_text, test_set_visual]
    if mode == "AV":
        x_train = [train_set_audio, train_set_visual]
        x_valid = [valid_set_audio, valid_set_visual]
        x_test = [test_set_audio, test_set_visual]
    if mode == "AT":
        x_train = [train_set_audio, train_set_text]
        x_valid = [valid_set_audio, valid_set_text]
        x_test = [test_set_audio, test_set_text]
    if mode == "VT":
        x_train = [train_set_text, train_set_visual]
        x_valid = [valid_set_text, valid_set_visual]
        x_test = [test_set_text, test_set_visual]

    early_stopping = EarlyStopping(monitor=val_method,
                                   min_delta=0,
                                   patience=10,
                                   verbose=1,
                                   mode=val_mode)
    callbacks_list = [early_stopping]
    merged_model.fit(x_train,
                     y_train,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=[x_valid, y_valid],
                     callbacks=callbacks_list)
    preds = merged_model.predict(x_test)
    out = open(outfile, "wb")

    print "testing output before eval metrics calcs.."
    print y_test[0]
    print preds[0]

    if task == "SR":
        preds = np.concatenate(preds)
        mae = sklearn.metrics.mean_absolute_error(y_test, preds)
        r = scipy.stats.pearsonr(y_test, preds)
        out.write("Test MAE: " + str(mae) + "\n")
        out.write("Test CORR: " + str(r) + "\n")
    if task == "S5":
        preds = convert_pred_hot(preds)
        acc = sklearn.metrics.accuracy_score(y_test, preds)
        out.write("Test ACC: " + str(acc) + "\n")
    if task == "SB":
        acc = np.mean((preds > 0.5) == y_test.reshape(-1, 1))
        preds = np.concatenate(preds)
        preds = preds > 0.5
        f1 = sklearn.metrics.f1_score(y_test, preds)
        out.write("Test ACC: " + str(acc) + "\n")
        out.write("Test F1: " + str(f1) + "\n")

    out.write("use_PCA=" + str(use_PCA) + "\n")
    out.write("dropout_rate=" + str(dropout_rate) + "\n")
    out.write("n_layers=" + str(n_layers) + "\n")
    out.write("max_len=" + str(max_len) + "\n")
    out.write("nodes=" + str(nodes) + "\n")
    out.write("task=" + str(task) + "\n")
    out.write("mode=" + str(mode) + "\n")
    out.write("num_train=" + str(len(train_set_ids)) + "\n")
    out.write("num_valid=" + str(len(valid_set_ids)) + "\n")
    out.write("num_test=" + str(len(test_set_ids)) + "\n")
    out.close()
Esempio n. 46
0
def generate_BiLSTM_model(number_classes):
    model = Sequential()
    model.add(Embedding(MAX_FEATURES, 128))
    # model.add(Bidirectional(LSTM(32, dropout=0.2, recurrent_dropout=0.2, activation='tanh', return_sequences=True)))
    model.add(Bidirectional(LSTM(64, activation='tanh'), merge_mode='concat'))
    model.add(Dropout(0.5))
    model.add(Dense(number_classes, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adamax', metrics=['accuracy'])
    return model
def build_CNN(hparams):
    img_size = 32
    num_classes = 10

    model = Sequential()
    model.add(
        Conv2D(filters=hparams[1],
               kernel_size=(3, 3),
               padding='same',
               input_shape=(img_size, img_size, 3),
               kernel_initializer='he_normal',
               activation='relu'))
    model.add(
        Conv2D(filters=hparams[1],
               kernel_size=(3, 3),
               kernel_initializer='he_normal',
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(hparams[6]))

    model.add(
        Conv2D(filters=hparams[2],
               kernel_size=(3, 3),
               padding='same',
               kernel_initializer='he_normal',
               activation='relu'))
    model.add(
        Conv2D(filters=hparams[2],
               kernel_size=(3, 3),
               kernel_initializer='he_normal',
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(hparams[6]))

    model.add(Flatten())
    for i in range(hparams[3]):
        model.add(Dense(hparams[4], activation=hparams[5]))  #1

    model.add(Dropout(hparams[6]))
    model.add(Dense(num_classes, activation='softmax'))

    optimizer = Adam(lr=hparams[0])

    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Esempio n. 48
0
def generate_LSTM_model(number_classes):

    model = Sequential()
    model.add(Embedding(MAX_FEATURES, 128))
    model.add(LSTM(32,
                   dropout=0.2,
                   recurrent_dropout=0.2,
                   activation='tanh',
                   return_sequences=True))
    model.add(LSTM(64,
                   dropout=0.2,
                   recurrent_dropout=0.2,
                   activation='tanh'))
    model.add(Dense(number_classes, activation='sigmoid'))

    model.compile(loss='categorical_crossentropy',
                  optimizer = 'rmsprop',
                  metrics=['accuracy'])

    return model