def __init__(self,
                 cnn_rand=True,
                 STATIC=False,
                 ExternalEmbeddingModel=None,
                 n_symbols=None,
                 wordmap=None):
        # Model hyperparameters
        self.embedding_dim = 300  ##
        self.filter_sizes = (3, 8)
        self.num_filters = 10
        self.hidden_dims = 100
        self.dropout_prob = (0.5, 0.8)
        self.loss = 'categorical_crossentropy'
        self.optimizer = 'rmsprop'
        self.l1_reg = 0
        self.l2_reg = 3  ##according to kim14
        self.std = 0.05  ## standard deviation
        # Training Parameters
        self.set_training_paramters(batch_size=64, num_epochs=10)
        self.set_processing_parameters(
            sequence_length=100,
            vocab_size=vocabsize)  ## changed to fit short text
        # Defining Model Layers
        if cnn_rand:
            ##Embedding Layer Randomly initialized
            embedding_layer = Embedding(output_dim=self.embedding_dim,
                                        input_dim=self.vocab_size)
            Classes = dh.read_labels()
            n_classes = len(Classes)

        else:
            ## Use pretrained model
            #n_symbols, wordmap = dh.get_word_map_num_symbols()
            self.set_etxrernal_embedding(ExternalEmbeddingModel)
            vecDic = dh.GetVecDicFromGensim(self.ExternalEmbeddingModel)
            Classes = dh.read_labels()
            n_classes = len(Classes)
            ## Define Embedding Layer
            embedding_weights = dh.GetEmbeddingWeights(embedding_dim=300,
                                                       n_symbols=n_symbols,
                                                       wordmap=wordmap,
                                                       vecDic=vecDic)
            embedding_layer = Embedding(output_dim=self.embedding_dim,
                                        input_dim=n_symbols,
                                        trainable=STATIC)
            embedding_layer.build(
                (None, ))  # if you don't do this, the next step won't work
            embedding_layer.set_weights([embedding_weights])

        Sequence_in = Input(shape=(self.sequence_length, ), dtype='int32')
        embedding_seq = embedding_layer(Sequence_in)
        x = Dropout(self.dropout_prob[0])(embedding_seq)
        ## define Core Convultional Layers
        conv_blocks = []
        for sz in self.filter_sizes:
            conv = Convolution1D(filters=self.num_filters,
                                 kernel_size=sz,
                                 padding="valid",
                                 activation="relu",
                                 strides=1)(x)
            conv = MaxPooling1D(pool_size=2)(conv)
            conv = Flatten()(conv)
            conv_blocks.append(conv)

        x = Concatenate()(
            conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
        x = Dropout(self.dropout_prob[1])(x)
        x = Dense(self.hidden_dims,
                  activation="relu",
                  kernel_initializer=RandomNormal(stddev=self.std),
                  kernel_regularizer=L1L2(l1=self.l1_reg, l2=self.l2_reg))(x)
        preds = Dense(n_classes, activation='softmax')(x)
        ## return graph model
        model = Model(Sequence_in, preds)
        model.compile(loss=self.loss,
                      optimizer=self.optimizer,
                      metrics=['accuracy'])
        self.model = model
    def __init__(self,
                 BiGRU_rand=True,
                 STATIC=False,
                 ExternalEmbeddingModel=None,
                 n_symbols=None,
                 wordmap=None):
        self.embedding_dim = 300
        self.hidden_dims = 100
        self.dropout_prob = (0.5, 0.8)
        self.loss = 'categorical_crossentropy'
        self.optimizer = 'rmsprop'
        self.l1_reg = 0
        self.l2_reg = 3  ##according to kim14
        self.std = 0.05  ## standard deviation
        # Training Parameters
        self.set_training_paramters(batch_size=64, num_epochs=10)
        self.set_processing_parameters(
            sequence_length=30, vocab_size=50000)  ## changed to fit short text
        # Defining Model Layers        if clstm_rand:
        ##Embedding Layer Randomly initialized
        if BiGRU_rand:
            ##Embedding Layer Randomly initialized
            embedding_layer = Embedding(output_dim=self.embedding_dim,
                                        input_dim=self.vocab_size)
            Classes = dh.read_labels()
            n_classes = len(Classes)

        else:
            ## Use pretrained model
            #n_symbols, wordmap = dh.get_word_map_num_symbols()
            self.set_etxrernal_embedding(ExternalEmbeddingModel)
            vecDic = dh.GetVecDicFromGensim(self.ExternalEmbeddingModel)
            Classes = dh.read_labels()
            n_classes = len(Classes)
            ## Define Embedding Layer
            embedding_weights = dh.GetEmbeddingWeights(
                embedding_dim=self.embedding_dim,
                n_symbols=n_symbols,
                wordmap=wordmap,
                vecDic=vecDic)
            embedding_layer = Embedding(output_dim=self.embedding_dim,
                                        input_dim=n_symbols,
                                        trainable=STATIC)
            embedding_layer.build(
                (None, ))  # if you don't do this, the next step won't work
            embedding_layer.set_weights([embedding_weights])

        Sequence_in = Input(shape=(self.sequence_length, ), dtype='int32')
        embedding_seq = embedding_layer(Sequence_in)
        x = Dropout(self.dropout_prob[0])(embedding_seq)
        x = Bidirectional(
            GRU(self.hidden_dims,
                kernel_initializer=RandomNormal(stddev=self.std),
                kernel_regularizer=L1L2(l1=self.l1_reg, l2=self.l2_reg),
                return_sequences=False))(x)
        preds = Dense(n_classes, activation='softmax')(x)
        ## return graph model
        model = Model(Sequence_in, preds)
        model.compile(loss=self.loss,
                      optimizer=self.optimizer,
                      metrics=['accuracy'])
        self.model = model
    def __init__(self,
                 att_rand=True,
                 ExternalEmbeddingModel=None,
                 n_symbols=None,
                 wordmap=None,
                 STATIC=True):
        self.dropout_prob = (0.36, 0.36)
        self.hidden_dims = 100
        self.std = 0.05
        self.l1_reg = 3
        self.l2_reg = 3
        self.loss = 'categorical_crossentropy'
        self.optimizer = 'rmsprop'
        self.sequence_length = 30
        self.embedding_dim = 300
        self.vocab_size = 50000
        self.num_epochs = 5
        self.batch_size = 64

        ## Define the BiGRU model

        if att_rand:
            ##Embedding Layer Randomly initialized
            embedding_layer = Embedding(output_dim=self.embedding_dim,
                                        input_dim=self.vocab_size)

        else:
            ## Use pretrained model
            #n_symbols, wordmap = dh.get_word_map_num_symbols()
            self.set_etxrernal_embedding(ExternalEmbeddingModel)
            vecDic = dh.GetVecDicFromGensim(self.ExternalEmbeddingModel)
            ## Define Embedding Layer
            embedding_weights = dh.GetEmbeddingWeights(
                embedding_dim=self.embedding_dim,
                n_symbols=n_symbols,
                wordmap=wordmap,
                vecDic=vecDic)
            embedding_layer = Embedding(output_dim=self.embedding_dim,
                                        input_dim=n_symbols,
                                        trainable=STATIC)
            embedding_layer.build(
                (None, ))  # if you don't do this, the next step won't work
            embedding_layer.set_weights([embedding_weights])

        ###################################################
        SeqIn = Input(shape=(self.sequence_length, ), dtype='int32')
        embedding_seq = embedding_layer(SeqIn)
        M1 = Dropout(self.dropout_prob[0])(embedding_seq)
        #M1 = Activation('tanh')(embedding_seq)
        activations = Bidirectional(
            GRU(self.hidden_dims,
                kernel_initializer=RandomNormal(stddev=self.std),
                kernel_regularizer=L1L2(l1=self.l1_reg, l2=self.l2_reg),
                return_sequences=True))(M1)

        ## Timedistributed dense for each activation

        attention = TimeDistributed(Dense(1, activation='tanh'))(activations)
        attention = Flatten()(attention)
        attention = Activation('softmax')(attention)
        attention = RepeatVector(2 * (self.hidden_dims))(attention)
        attention = Permute([2, 1])(attention)

        # apply the attention

        sent_representation = merge([activations, attention], mode='mul')
        sent_representation = Lambda(lambda xin: K.sum(xin, axis=1))(
            sent_representation)

        Classes = dh.read_labels()
        n_classes = len(Classes)
        preds = Dense(n_classes, activation='softmax')(sent_representation)

        model = Model(input=SeqIn, output=preds)
        model.compile(optimizer=self.optimizer,
                      loss=self.loss,
                      metrics=['accuracy'])

        self.model = model
    def __init__(self,
                 crepe_rand=True,
                 STATIC=False,
                 ExternalEmbeddingModel=None,
                 n_symbols=None,
                 wordmap=None,
                 vocabsize=None,
                 maxseq=None,
                 embedding_dim=None):
        '''
     Deep CNN for text classification based on Lecun15
     '''

        self.embedding_dim = embedding_dim
        self.filter_kernels = [7, 7, 3, 3, 3, 3]
        self.nb_filters = 256
        self.batch_size = 80
        self.nb_epochs = 10
        self.std = 0.05
        self.dropout_prob = (0.5, 0.8)
        self.hidden_dim = 300
        self.loss = 'categorical_crossentropy'
        self.optimizer = 'rmsprop'
        ''' Set Training Parameters'''

        self.set_training_paramters(batch_size=self.batch_size,
                                    num_epochs=self.nb_epochs)
        self.set_processing_parameters(sequence_length=maxseq,
                                       vocab_size=vocabsize)

        Classes = dh.read_labels()
        n_classes = len(Classes)

        if crepe_rand:
            ##Embedding Layer Randomly initialized
            embedding_layer = Embedding(output_dim=self.embedding_dim,
                                        input_dim=self.vocab_size)

        else:
            ## Use pretrained model
            # n_symbols, wordmap = dh.get_word_map_num_symbols()
            self.set_etxrernal_embedding(ExternalEmbeddingModel)
            vecDic = dh.GetVecDicFromGensim(self.ExternalEmbeddingModel)
            ## Define Embedding Layer
            embedding_weights = dh.GetEmbeddingWeights(embedding_dim=300,
                                                       n_symbols=n_symbols,
                                                       wordmap=wordmap,
                                                       vecDic=vecDic)
            embedding_layer = Embedding(output_dim=self.embedding_dim,
                                        input_dim=n_symbols,
                                        trainable=STATIC)
            embedding_layer.build(
                (None, ))  # if you don't do this, the next step won't work
            embedding_layer.set_weights([embedding_weights])

        SequenceIn = Input(shape=(self.sequence_length, ), dtype='int32')
        embedding_layer = embedding_layer(SequenceIn)
        x = Dropout(self.dropout_prob[0])(embedding_layer)
        x = Convolution1D(filters=self.nb_filters,
                          kernel_size=self.filter_kernels[0],
                          padding='valid',
                          activation='relu')(x)
        #x = MaxPooling1D(pool_size=3)(x)
        x = Convolution1D(filters=self.nb_filters,
                          kernel_size=self.filter_kernels[1],
                          padding='valid',
                          activation='relu')(x)
        #x = MaxPooling1D(pool_size=4)(x)
        x = Convolution1D(filters=self.nb_filters,
                          kernel_size=self.filter_kernels[2],
                          padding='valid',
                          activation='relu')(x)
        x = Convolution1D(filters=self.nb_filters,
                          kernel_size=self.filter_kernels[3],
                          padding='valid',
                          activation='relu')(x)
        x = Convolution1D(filters=self.nb_filters,
                          kernel_size=self.filter_kernels[4],
                          padding='valid',
                          activation='relu')(x)
        x = Convolution1D(filters=self.nb_filters,
                          kernel_size=self.filter_kernels[5],
                          padding='valid',
                          activation='relu')(x)
        x = MaxPooling1D(pool_size=3)(x)
        x = Flatten()(x)
        x = Dense(self.hidden_dim, activation='relu')(x)
        x = Dropout(self.dropout_prob[1])(x)
        x = Dense(self.hidden_dim, activation='relu')(x)
        x = Dropout(self.dropout_prob[1])(x)
        preds = Dense(n_classes, activation='softmax')(x)
        ## return graph model
        model = Model(SequenceIn, preds)
        model.compile(loss=self.loss,
                      optimizer=self.optimizer,
                      metrics=['accuracy'])
        self.model = model