Beispiel #1
0
    def fit(self, X, y, X_t, y_t):

        X = self.transfer_shape(X)
        X_t = self.transfer_shape(X_t)
        y = y[2-1:]
        y_t = y_t[2-1:]
        #self.classifier.add(Dense(500, input_shape=( X.shape[1],)))
        self.classifier.add(LSTM(input_shape=(2, X.shape[2]),  output_dim =8,
                                 return_sequences = True, kernel_initializer=initializers.glorot_normal(123) ))
        self.classifier.add(Flatten())
        self.classifier.add(Activation('relu'))
        self.classifier.add(Dropout(0.3, seed=1234))
        self.classifier.add(Dense( output_dim=4, kernel_initializer=initializers.glorot_normal(123)))
        self.classifier.add(Activation('relu'))
        self.classifier.add(Dropout(0.3, seed=123))
        self.classifier.add(Dense( output_dim=4, kernel_initializer=initializers.glorot_normal(123)))
        self.classifier.add(Activation('relu'))
        self.classifier.add(Dropout(0.3, seed=123))
        self.classifier.add(Dense(output_dim=4))
        self.classifier.add(Activation('tanh'))
        #self.classifier.add(Activation('relu'))
        self.classifier.add(Dense(output_dim=1, kernel_initializer=initializers.glorot_normal(123)))
        self.classifier.add(Activation('sigmoid'))
        sgd = SGD(lr=0.01)
        opt = Adam(lr=4e-5)
        opt = Adam()
        #opt = RMSprop(lr=4e-3)
        #opt = Adadelta()

        self.classifier.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
        self.classifier.fit(X, y, validation_data=(X_t, y_t), batch_size=self.batch_size, nb_epoch=self.nb_epoch)
Beispiel #2
0
def Model1(embedding_matrix=None):
    embedding_layer = Embedding(len(word_index) + 1,
                                EMBEDDING_DIM,
                                weights=embedding_matrix,
                                input_length=MAX_SEQUENCE_LENGTH,
                                trainable=True)
    sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
    embedded_sequences = embedding_layer(sequence_input)
    net = SpatialDropout1D(0.2)(embedded_sequences)
    net = Bidirectional(layer=GRU(EMBEDDING_DIM,
                                  return_sequences=True,
                                  kernel_initializer=glorot_normal(seed=1029),
                                  recurrent_initializer=orthogonal(gain=1.0,
                                                                   seed=1029)),
                        name='bidirectional_gru')(net)
    # net = Bidirectional(
    #         layer=LSTM(EMBEDDING_DIM, return_sequences=True,
    #                         kernel_initializer=glorot_normal(seed=1029),
    #                         recurrent_initializer=orthogonal(gain=1.0, seed=1029)),
    #         name='bidirectional_lstm')(net)
    #net = BatchNormalization()(net)

    capsul = Capsule(num_capsule=10,
                     dim_capsule=10,
                     routings=4,
                     share_weights=True)(net)  # noqa
    capsul = Flatten()(capsul)
    capsul = DropConnect(Dense(8, activation="relu"), prob=0.01)(capsul)
    atten = Attention(step_dim=MAX_SEQUENCE_LENGTH, name='attention')(net)
    atten = DropConnect(Dense(4, activation="relu"), prob=0.2)(atten)
    net = Concatenate(axis=-1)([capsul, atten])
    # net = GlobalAveragePooling1D()(net)
    # output = Dense(units=1, activation='sigmoid', name='output')(net)
    # net = GlobalAveragePooling1D()(net)
    net = Dense(100, activation='relu')(net)
    # net = Dropout(0.3)(net)
    output = Dense(2, activation='softmax')(net)
    model2 = Model(inputs=sequence_input, outputs=output)
    model2.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['acc'])
    model2.summary()
    # Keeping a checkpoint to store only the model which gives best output validation accuracy
    chkpt2 = ModelCheckpoint('expertiza_nn_model2.h5',
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True)
    return (model2, chkpt2)
Beispiel #3
0
    def create_sharedLSTMmodel(self):
        inputs = Input(shape=(self.maxlen, self.height, self.width, 3))

        x = TimeDistributed(self.resnet, name="resnet")(inputs)
        x = TimeDistributed(GlobalAveragePooling2D(), name="GAP")(x)
        x = TimeDistributed(Dense(512, activation='relu'), name="dense")(x)
        predictions = Bidirectional(
            LSTM(128,
                 batch_input_shape=(None, self.maxlen, 512),
                 kernel_initializer=glorot_normal(seed=20181020),
                 recurrent_initializer=orthogonal(gain=1.0, seed=20181020),
                 dropout=0.01,
                 recurrent_dropout=0.01))(x)
        predictions = Reshape((1, 256))(predictions)
        shared_layers = Model(inputs, predictions, name="shared_LSTMlayers")
        return shared_layers
Beispiel #4
0
    def __init__(self,
                 lambda_initializer=initializers.Constant(value=1),
                 t_initializer=initializers.glorot_normal(),
                 shared_axes=None,
                 **kwargs):
        super(BHSA, self).__init__(**kwargs)

        self.lambda_initializer = initializers.get(lambda_initializer)
        self.t_initializer = initializers.get(t_initializer)

        if (shared_axes == None):
            self.shared_axes = None
        elif not isinstance(shared_axes, (list, tuple)):
            self.shared_axes = [shared_axes]
        else:
            self.shared_axes = list(shared_axes)
Beispiel #5
0
def CNN___itworks(input_shape, n_class, CNNkernel, CNNChannel,    DenseChannel):
    # ex_name: 0715
    cnn_init = initializers.glorot_normal()
    model = Sequential()
    model.add(Conv2D(32, kernel_size=(19,19), strides=(1,1),
        activation='relu', input_shape=input_shape,
        kernel_initializer=cnn_init, padding='valid'))
    #model.add(MaxPooling2D(pool_size=(1, 3), strides=(1, 3)))
    model.add(Conv2D(32, (19,19), activation='relu', kernel_initializer=cnn_init, padding='valid'))
    #model.add(Conv2D(32, (5,5), activation='relu',kernel_initializer=cnn_init, padding='valid'))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(n_class, activation='softmax'))
    return model
def FireModule(s_1x1, e_1x1, e_3x3, name):
    """FireModule
        Fire module for the SqueezeNet model. 
        Implements the expand layer, which has a mix of 1x1 and 3x3 filters, 
        by using two conv layers concatenated in the channel dimension. 
    :param s_1x1: Number of 1x1 filters in the squeeze layer
    :param e_1x1: Number of 1x1 filters in the expand layer
    :param e_3x3: Number of 3x3 filters in the expand layer
    :param name: Name of the fire module
    :return: 
        Returns a callable function
    """
    # Concat on the channel axis. TensorFlow uses (rows, cols, channels), while

    concat_axis = 3
    initializer = initializers.glorot_normal()

    def layer(x):
        squeeze = Convolution2D(s_1x1,
                                kernel_size=(1, 1),
                                activation='relu',
                                kernel_initializer=initializer,
                                name=name + '/squeeze1x1')(x)
        squeeze = BatchNormalization(name=name + '/squeeze1x1_bn')(squeeze)

        # Needed to merge layers expand_1x1 and expand_3x3.
        expand_1x1 = Convolution2D(e_1x1,
                                   kernel_size=(1, 1),
                                   activation='relu',
                                   kernel_initializer=initializer,
                                   name=name + '/expand1x1')(squeeze)

        # Pad the border with zeros. Not needed as padding='same' will do the same.
        # expand_3x3 = ZeroPadding2D(padding=(1, 1), name=name+'_expand_3x3_padded')(squeeze)
        expand_3x3 = Convolution2D(e_3x3,
                                   kernel_size=(3, 3),
                                   padding='same',
                                   activation='relu',
                                   kernel_initializer=initializer,
                                   name=name + '/expand3x3')(squeeze)
        # Concat in the channel dim
        expand_merge = concatenate([expand_1x1, expand_3x3],
                                   axis=concat_axis,
                                   name=name + '/concat')
        return expand_merge

    return layer
Beispiel #7
0
def ref_crnn(input_shape, n_class, model_size_info):
    cprint('**** CRNN ****', 'green')
    assert (len(model_size_info) == 9)
    cnn_info = model_size_info[:5]
    rnn_info = model_size_info[5:8]
    fc_unit = model_size_info[8]
    init = initializers.glorot_normal()
    # MODEL
    model = Sequential()
    model.add(
        Conv2D(cnn_info[0],
               kernel_size=(cnn_info[1], cnn_info[2]),
               strides=(cnn_info[3], cnn_info[4]),
               activation='relu',
               input_shape=input_shape,
               kernel_initializer=init,
               padding='valid'))
    model.add(layers.TimeDistributed(Flatten()))
    for i in range(rnn_info[0] - 1):
        if rnn_info[2] == 0:
            model.add(
                layers.Bidirectional(
                    layers.LSTM(rnn_info[1], return_sequences=True)))
        elif rnn_info[2] == 1:
            model.add(
                layers.Bidirectional(
                    layers.GRU(rnn_info[1], return_sequences=True)))
        else:
            raise ValueError('wrong type name')
    if rnn_info[2] == 0:
        model.add(layers.Bidirectional(layers.LSTM(rnn_info[1])))
    elif rnn_info[2] == 1:
        model.add(layers.Bidirectional(layers.GRU(rnn_info[1])))
    else:
        raise ValueError('wrong type name')
    model.add(
        Dense(fc_unit,
              activation='relu',
              kernel_initializer=init,
              bias_initializer='zeros'))
    model.add(
        Dense(n_class,
              activation='softmax',
              kernel_initializer=init,
              bias_initializer='zeros'))
    return model
Beispiel #8
0
def CNN(input_shape, n_class, CNNkernel, CNNChannel,    DenseChannel):
    # ex_name: 0715
    cprint(str(CNNkernel)+str(CNNChannel)+str(DenseChannel),'yellow')
    cnn_init = initializers.glorot_normal()
    model = Sequential()
    model.add(Conv2D(CNNChannel, kernel_size=(CNNkernel,CNNkernel), strides=(1,1),
        activation='relu', input_shape=input_shape,
        kernel_initializer=cnn_init, padding='valid'))
    model.add(MaxPooling2D(pool_size=(1, 3), strides=(1, 3)))
    model.add(Conv2D(CNNChannel, (CNNkernel,CNNkernel), activation='relu', kernel_initializer=cnn_init, padding='valid'))
    model.add(Conv2D(CNNChannel, (CNNkernel,CNNkernel), activation='relu',kernel_initializer=cnn_init, padding='valid'))
    model.add(Flatten())
    model.add(Dense(DenseChannel, activation='relu'))
    model.add(Dense(DenseChannel//2, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(n_class, activation='softmax'))
    return model
Beispiel #9
0
 def _make_model(self):
     step = 0
     print('******************************************', step)
     step += 1
     model_to_make = Sequential()
     print('******************************************', step)
     step += 1
     model_to_make.add(
         Conv2D(32, (5, 5),
                kernel_initializer=glorot_normal(),
                bias_initializer='zeros',
                strides=(3, 3),
                data_format='channels_first',
                input_shape=(3, 23, 23)))
     print(model_to_make.input_shape)
     print(model_to_make.output)
     print('******************************************', step)
     step += 1
     model_to_make.add(Activation('relu'))
     print(model_to_make.output)
     print('******************************************', step)
     step += 1
     model_to_make.add(
         Conv2D(filters=32,
                kernel_size=(3, 3),
                strides=(2, 2),
                data_format='channels_first',
                input_shape=(32, 7, 7)))
     print(model_to_make.output)
     print('******************************************', step)
     step += 1
     model_to_make.add(Activation('relu'))
     print(model_to_make.output)
     print('******************************************', 'flattened')
     model_to_make.add(Flatten())
     print(model_to_make.output)
     model_to_make.add(Dense(units=2, input_dim=288))
     print('******************************************', step)
     step += 1
     print(model_to_make.output)
     model_to_make.add(Activation('softmax'))
     print('******************************************', step)
     step += 1
     print('model waiting to be compiled')
     self.model = model_to_make
     print('******************************************')
Beispiel #10
0
def get_discriminator(input_size, G, G_input_size):
    """Returns the discriminator networks `D` and `DG`.
    `input_size` is the input size of `D` and `DG`.
    `G` is the generator model.
    
    Return a tuple of 2 elements:
      * The first element is `D`, the discriminator/critic
      * The second element id `DG` -> D(G(z))"""

    xavier = initializers.glorot_normal()

    x = Input(shape=(input_size, ), name='input_x')
    a = Reshape((28, 28, 1))(x)
    a = Conv2D(16, (3, 3), padding='same', kernel_initializer=xavier)(a)
    a = Flatten()(a)
    a = Dense(256, kernel_initializer=xavier)(x)
    a = LeakyReLU()(a)
    a = Dropout(0.5)(a)
    a = Dense(256, kernel_initializer=xavier)(a)
    a = LeakyReLU()(a)
    a = Dropout(0.5)(a)

    # creates an output to determine if an input is fake
    is_fake = Dense(1, activation='linear', name='output_is_fake')(a)

    # add the "fork" to classify
    classes = Dense(10, activation='softmax', name='D_classes')(a)

    # creates a D model that receives a real example as input
    D = Model(inputs=[x], outputs=[is_fake, classes], name='D')

    # creates another model that uses G as input
    z = Input(shape=(G_input_size, ), name='D_input_z')
    is_fake, classes = D(G(inputs=[z]))
    DG = Model(inputs=[z], outputs=[is_fake, classes], name='DG')

    # D shouldn't be trained during the generator's training faze
    DG.get_layer('D').trainable = False
    DG.compile(optimizer=Nadam(lr=0.0002),
               loss=[mean, 'categorical_crossentropy'])

    D.trainable = True
    D.compile(optimizer=Nadam(lr=0.0002),
              loss=[mean, 'categorical_crossentropy'])

    return D, DG
Beispiel #11
0
def get_generator(input_size, output_size):
    """Returns the generator model `G`.
    `input_size` and `output_size` are the size of the input vectors z for `G` and the output of `G`.
    """
    xavier = initializers.glorot_normal()

    z = Input(shape=(input_size, ), name='input_z')
    a = Dense(256, kernel_initializer=xavier)(z)
    a = LeakyReLU()(a)
    a = Reshape((16, 16, 1))(a)
    a = Conv2DTranspose(16, (3, 3), kernel_initializer=xavier)(a)
    a = LeakyReLU()(a)
    a = Flatten()(a)
    a = Dense(output_size, activation='sigmoid', kernel_initializer=xavier)(a)

    G = Model(inputs=[z], outputs=[a], name='G')
    return G
Beispiel #12
0
def jsbae_0318_C2F1(input_shape, n_class):
    #jsbae_0314_clean_clean ## BEST MODEL UNTIL NOW
    cnn_init = initializers.glorot_normal()
    model = Sequential()
    model.add(Conv2D(128, kernel_size=5, strides=(1,1),
        activation='relu', input_shape=input_shape,
        kernel_initializer=cnn_init, padding='valid'))
    model.add(MaxPooling2D(pool_size=(1, 3), strides=(1, 3)))
    #model.add(Conv2D(128, (5,5), activation='relu'))
    model.add(Conv2D(256, (5,5), activation='relu', kernel_initializer=cnn_init, padding='valid'))
    #model.add(Conv2D(256, (5,5), activation='relu',kernel_initializer=cnn_init, padding='valid'))
    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    #model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(n_class, activation='softmax'))
    return model
Beispiel #13
0
def train_model():
    # some declared variables
    randomSeed = 42
    networkInitialize = glorot_normal()
    inputImageShape = (224, 224, 3)
    epoch = 200
    btachSize = 32
    num_of_output_classes = 2
    random.seed(randomSeed)
    learningRate = 0.01

    trainX, testX, trainY, testY = load_data()
    # augmentation process
    augmentaion = ImageDataGenerator(rotation_range=30,
                                     width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     shear_range=0.2,
                                     zoom_range=0.2,
                                     horizontal_flip=True,
                                     fill_mode="nearest")

    checkpoint = ModelCheckpoint(
        'models\\model-{epoch:03d}-{acc:03f}-{val_acc:03f}.h5',
        verbose=1,
        monitor='val_acc',
        save_best_only=True,
        mode='auto')
    csv_logger = CSVLogger('report\\log_' + str(learningRate) + '.csv',
                           append=False,
                           separator=';')
    # training
    # compile the model
    model = cnn_model_structure(input_shape=inputImageShape,
                                num_classes=num_of_output_classes)
    model.compile(loss='categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])
    # print(model.summary())
    model = model.fit_generator(augmentaion.flow(trainX,
                                                 trainY,
                                                 batch_size=btachSize),
                                validation_data=(testX, testY),
                                steps_per_epoch=len(trainX),
                                epochs=epoch,
                                callbacks=[csv_logger, checkpoint])
Beispiel #14
0
def training_data(tmpdir_factory):
    import h5py
    from keras.layers import Dense
    from keras.models import Sequential
    from keras.optimizers import SGD
    from keras.initializers import glorot_normal, normal

    from deepreplay.datasets.parabola import load_data
    from deepreplay.callbacks import ReplayData

    filename = str(tmpdir_factory.mktemp('data').join('training.h5'))

    X, y = load_data(xlim=(-1, 1), n_points=1000, shuffle=True, seed=13)

    sgd = SGD(lr=0.05)

    glorot_initializer = glorot_normal(seed=42)
    normal_initializer = normal(seed=42)

    replaydata = ReplayData(X,
                            y,
                            filename=filename,
                            group_name='part1_activation_functions')

    np.random.seed(13)
    model = Sequential()
    model.add(
        Dense(input_dim=2,
              units=2,
              kernel_initializer=glorot_initializer,
              activation='sigmoid',
              name='hidden'))

    model.add(
        Dense(units=1,
              kernel_initializer=normal_initializer,
              activation='sigmoid',
              name='output'))

    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['acc'])

    model.fit(X, y, epochs=20, batch_size=16, callbacks=[replaydata])

    training_data = h5py.File(filename, 'r')
    return training_data['part1_activation_functions']
 def _make_model(self):
     step = 0
     print( '******************************************', step )
     step += 1
     model_to_make = Sequential()
     print( '******************************************', step )
     step += 1
     model_to_make.add( Conv2D( 32, (5, 5),
                                kernel_initializer=glorot_normal(),
                                bias_initializer='zeros',
                                strides=(3, 3),
                                data_format='channels_first',
                                input_shape=(3, 23, 23)
                                ) )
     print( model_to_make.input_shape )
     print( model_to_make.output )
     print( '******************************************', step )
     step += 1
     model_to_make.add( Activation( 'relu' ) )
     print( model_to_make.output )
     print( '******************************************', step )
     step += 1
     model_to_make.add( Conv2D( filters=32,
                                kernel_size=(3, 3),
                                strides=(2, 2),
                                data_format='channels_first',
                                input_shape=(32, 7, 7) ) )
     print( model_to_make.output )
     print( '******************************************', step )
     step += 1
     model_to_make.add( Activation( 'relu' ) )
     print( model_to_make.output )
     print( '******************************************', 'flattened' )
     model_to_make.add( Flatten() )
     print( model_to_make.output )
     model_to_make.add( Dense( units=2, input_dim=288 ) )
     print( '******************************************', step )
     step += 1
     print( model_to_make.output )
     model_to_make.add( Activation( 'softmax' ) )
     print( '******************************************', step )
     step += 1
     print( 'model waiting to be compiled' )
     self.model = model_to_make
     print( '******************************************' )
Beispiel #16
0
def agent_init():

    optimizer_map = {
        'Adam': Adam(lr=a_globs.ALPHA),
        'RMSprop': RMSprop(lr=a_globs.ALPHA),
        'Adagrad': Adagrad(lr=a_globs.ALPHA),
        'SGD': SGD(lr=a_globs.ALPHA)
    }
    initializer_map = {
        'random': random_uniform(),
        'glorot': glorot_normal(),
        'he': he_normal()
    }

    a_globs.cur_epsilon = a_globs.EPSILON

    #The main buffer contains all of the sub buffers used to store different types of states, to support biased sampling
    a_globs.generic_buffer = []
    a_globs.buffer_container = [a_globs.generic_buffer]

    #Initialize the neural network
    a_globs.model = Sequential()
    init_weights = initializer_map[a_globs.INIT]

    a_globs.model.add(
        Dense(a_globs.NUM_NERONS_LAYER_1,
              activation='relu',
              kernel_initializer=init_weights,
              input_shape=(a_globs.FEATURE_VECTOR_SIZE, )))
    a_globs.model.add(
        Dense(a_globs.NUM_NERONS_LAYER_2,
              activation='relu',
              kernel_initializer=init_weights))
    a_globs.model.add(
        Dense(a_globs.NUM_ACTIONS,
              activation='linear',
              kernel_initializer=init_weights))

    a_globs.model.compile(loss='mse',
                          optimizer=optimizer_map[a_globs.OPTIMIZER])
    summarize_model(a_globs.model, a_globs.AGENT)

    #Create the target network
    a_globs.target_network = clone_model(a_globs.model)
    a_globs.target_network.set_weights(a_globs.model.get_weights())
def built_and_compile(params, num_classes):
    layers = [
        Conv1D(params['conv1'],
               kernel_size=params['kernel_size1'],
               activation=params['activation1'],
               input_shape=(params['data_dim'], 1),
               use_bias=False,
               kernel_initializer=glorot_normal(seed=7)),
        BatchNormalization(),
        MaxPooling1D(params['pool1']),
        Dropout(rate=params['drop_rate1']),
        Conv1D(params['conv2'],
               kernel_size=params['kernel_size2'],
               activation=params['activation2'],
               use_bias=False,
               kernel_initializer=glorot_normal(seed=7)),
        BatchNormalization(),
        MaxPooling1D(params['pool2']),
        Dropout(rate=params['drop_rate2']),
        Conv1D(params['conv3'],
               kernel_size=params['kernel_size3'],
               activation=params['activation3'],
               use_bias=False,
               kernel_initializer=glorot_normal(seed=7)),
        BatchNormalization(),
        MaxPooling1D(params['pool3']),
        Dropout(rate=params['drop_rate3']),
        Conv1D(params['conv4'],
               kernel_size=params['kernel_size4'],
               activation=params['activation4'],
               use_bias=False,
               kernel_initializer=glorot_normal(seed=7)),
        BatchNormalization(),
        MaxPooling1D(params['pool4']),
        GlobalAveragePooling1D(),
        Dense(params['dense1'],
              activation=params['dense1_act'],
              use_bias=False,
              kernel_initializer=glorot_normal(seed=7)),
        BatchNormalization(),
        Dropout(rate=params['drop_rate4']),
        Dense(num_classes,
              activation='softmax',
              kernel_initializer=glorot_normal(seed=7))
    ]

    model = Sequential(layers)
    model.compile(loss='categorical_crossentropy',
                  optimizer=params['optimizer'],
                  metrics=['accuracy'])

    return model
Beispiel #18
0
    def _get_init(self):
        """ -------------------------------------------------------------------------------------------------
        Return an Initializer according to the object attribute

        return:         [keras.initializers.Initializer]
        ------------------------------------------------------------------------------------------------- """
        if self.k_initializer == 'RUNIF':
            return initializers.RandomUniform(minval=-0.05,
                                              maxval=0.05,
                                              seed=cnfg['seed'])

        if self.k_initializer == 'GLOROT':
            return initializers.glorot_normal(seed=cnfg['seed'])

        if self.k_initializer == 'HE':
            return initializers.he_normal(seed=cnfg['seed'])

        ms.print_err("Initializer {} not valid".format(self.k_initializer))
Beispiel #19
0
    def __init__(self,sess,dim_action,dim_state,n_plan_data,\
                 n_traj,time_steps,\
                 save_folder,
                 key_configs=None,x_scaler=None,c_scaler=None):
        self.initializer = initializers.glorot_normal()
        self.sess = sess
        self.key_configs = key_configs
        self.save_folder = save_folder
        self.s_scaler = x_scaler

        self.n_traj = n_traj
        self.time_steps = time_steps
        self.n_plan_data = n_plan_data

        self.noise_term_var = 0.25  #y= x+L * z, Z~N(0,1), then x ~ N(x,L^2)?
        self.setup_data_dimensions(dim_action, dim_state)
        self.setup_inputs()
        self.create_policy()
Beispiel #20
0
    def create_sharedAutoRegLSTMmodel(self):
        inputs = Input(shape=(2 * self.maxlen, 1))

        mid_lstm = Bidirectional(
            LSTM(128,
                 batch_input_shape=(None, 2 * self.maxlen, 1),
                 kernel_initializer=glorot_normal(seed=20181020),
                 recurrent_initializer=orthogonal(gain=1.0, seed=20181020),
                 dropout=0.01,
                 recurrent_dropout=0.01,
                 return_sequences=True))(inputs)

        mid_dense = Dense(256, activation='relu')(mid_lstm)
        predictions = Dense(self.maxlen, activation='sigmoid')(mid_dense)

        shared_layers = Model(inputs,
                              predictions,
                              name="shared_AutoRegLSTMlayers")
        return shared_layers
Beispiel #21
0
def get_model(embed_weights):
    input_layer = Input(shape=(MAX_LEN, ), name='input')
    # 1. embedding layer
    # get embedding weights
    print('load pre-trained embedding weights ......')
    input_dim = embed_weights.shape[0]
    output_dim = embed_weights.shape[1]
    x = Embedding(input_dim=input_dim,
                  output_dim=output_dim,
                  weights=[embed_weights],
                  trainable=False,
                  name='embedding')(input_layer)
    # clean up
    del embed_weights, input_dim, output_dim
    gc.collect()
    # 2. dropout
    x = SpatialDropout1D(rate=SPATIAL_DROPOUT)(x)
    # 3. bidirectional lstm
    x = Bidirectional(layer=CuDNNLSTM(
        RNN_UNITS,
        return_sequences=True,
        kernel_initializer=glorot_normal(seed=1029),
        recurrent_initializer=orthogonal(gain=1.0, seed=1029)),
                      name='bidirectional_lstm')(x)
    # 4. capsule layer
    capsul = Capsule(num_capsule=10,
                     dim_capsule=10,
                     routings=4,
                     share_weights=True)(x)  # noqa
    capsul = Flatten()(capsul)
    capsul = DropConnect(Dense(32, activation="relu"), prob=0.01)(capsul)

    # 5. attention later
    atten = Attention(step_dim=MAX_LEN, name='attention')(x)
    atten = DropConnect(Dense(16, activation="relu"), prob=0.05)(atten)
    x = Concatenate(axis=-1)([capsul, atten])

    # 6. output (sigmoid)
    output_layer = Dense(units=1, activation='sigmoid', name='output')(x)
    model = Model(inputs=input_layer, outputs=output_layer)
    # compile model
    model.compile(loss='binary_crossentropy', optimizer='adam')
    return model
Beispiel #22
0
    def discriminator(self):
        if self.D:
            return self.D

        # kern_init = initializers.RandomNormal(mean=0.0, stddev=0.02, seed=None)
        kern_init = initializers.glorot_normal()

        input_shape = (self.img_rows, self.img_cols, self.channel)
        input_img = Input(shape=input_shape, name='Input_Image')

        x = Conv2D(16, 5, strides=2, input_shape=input_shape, padding='same', kernel_initializer=kern_init)(input_img)
        # x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = Conv2D(32, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = Conv2D(64, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = Conv2D(128, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = Conv2D(256, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = Conv2D(512, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)

        # Out: 1-dim probability
        x = Flatten()(x)
        x = Dense(1, activation='sigmoid')(x)


        self.D = Model(inputs=input_img, outputs=x, name='Discriminator')

        self.D.summary()
        return self.D
Beispiel #23
0
    def __init__(self, sess, dim_data, dim_context, dim_konf, save_folder):
        self.opt_G = Adam(lr=1e-4, beta_1=0.5)
        self.opt_D = Adam(lr=1e-3, beta_1=0.5)
        self.opt_D = Adadelta()

        self.initializer = initializers.glorot_normal()
        self.sess = sess
        self.dim_data = dim_data
        self.dim_context = dim_context
        self.dim_k = dim_konf
        self.n_key_confs = dim_context[0]

        self.x_input = Input(shape=(dim_data, ), name='x', dtype='float32')
        self.w_input = Input(shape=dim_context, name='w', dtype='float32')
        self.k_input = Input(shape=dim_konf, name='k', dtype='float32')

        self.disc = self.createDisc()
        self.disc.summary()
        self.save_folder = save_folder
Beispiel #24
0
def ref_rnn(input_shape, n_class, model_size_info):
    '''
    model size info[-1]: LSTM for 0 and GRU for 1 
    '''
    cprint('**** ref_rnn ****', 'red')
    lstm1 = model_size_info[0]
    type_choice = model_size_info[-1]
    init = initializers.glorot_normal()
    # model
    model = Sequential()
    model.add(
        layers.Reshape((
            input_shape[0],
            input_shape[1],
        ),
                       input_shape=input_shape))
    if type_choice == 0: model.add(layers.LSTM(lstm1))
    elif type_choice == 1: model.add(layers.GRU(lstm1))
    else: raise ValueError('wrong type name')

    if len(model_size_info) == 3:
        lstm1, dnn1, type_choice = model_size_info
        model.add(
            Dense(dnn1,
                  activation='relu',
                  kernel_initializer=init,
                  bias_initializer='zeros'))
        model.add(
            Dense(n_class,
                  activation='softmax',
                  kernel_initializer=init,
                  bias_initializer='zeros'))
    elif len(model_size_info) == 2:
        lstm1, type_choice = model_size_info
        model.add(
            Dense(n_class,
                  activation='softmax',
                  kernel_initializer=init,
                  bias_initializer='zeros'))
    else:
        raise ValueError('model size length too long.')
    return model
Beispiel #25
0
    def generator(self):

        if self.G:
            return self.G

        # kern_init = initializers.RandomNormal(mean=0.0, stddev=0.02, seed=None)
        kern_init = initializers.glorot_normal()

        input_shape = (self.noise_dim,)
        input_noise = Input(shape=input_shape, name='noise')

        dim = 7 # cifar10 & celebA      
        depth = 512

        x = Dense(dim * dim * depth, kernel_initializer=kern_init)(input_noise)
        # x = BatchNormalization()(x)
        # x = Activation('relu')(x)
        x = Reshape((dim, dim, depth))(x)

        x = Conv2DTranspose(depth / 2, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = Activation('selu')(x)

        x = Conv2DTranspose(depth / 4, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = Activation('selu')(x)

        x = Conv2DTranspose(depth / 8, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = Activation('selu')(x)

        x = Conv2DTranspose(depth / 16, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        # x = BatchNormalization()(x)
        x = Activation('selu')(x)

        x = Conv2DTranspose(self.channel, 5, strides=2, padding='same', kernel_initializer=kern_init)(x)
        x = Activation('tanh')(x)                               

        self.G = Model(inputs = input_noise, outputs = x, name='Generator')

        self.G.summary()
        return self.G    
Beispiel #26
0
def get_model(embed_weights):
    input_layer = Input(shape=(MAX_LEN, ), name='input')
    # 1. embedding layer
    # get embedding weights
    print('load pre-trained embedding weights ......')
    input_dim = embed_weights.shape[0]
    output_dim = embed_weights.shape[1]
    x = Embedding(input_dim=input_dim,
                  output_dim=output_dim,
                  weights=[embed_weights],
                  trainable=False,
                  name='embedding')(input_layer)
    # clean up
    del embed_weights, input_dim, output_dim
    gc.collect()
    # 2. dropout
    x = SpatialDropout1D(rate=SPATIAL_DROPOUT)(x)
    # 3. bidirectional lstm
    x = Bidirectional(layer=CuDNNLSTM(
        RNN_UNITS,
        return_sequences=True,
        kernel_initializer=glorot_normal(seed=1029),
        recurrent_initializer=orthogonal(gain=1.0, seed=1029)),
                      name='bidirectional_lstm')(x)
    # 4. capsule layer
    x = Capsule(num_capsule=10,
                dim_capsule=10,
                routings=4,
                share_weights=True,
                name='capsule')(x)
    x = Flatten(name='flatten')(x)
    # # 5. dense with dropConnect
    # x = DropConnect(
    #     Dense(DENSE_UNITS, activation="relu"),
    #     prob=0.05,
    #     name='dropConnect_dense')(x)
    # 6. output (sigmoid)
    output_layer = Dense(units=1, activation='sigmoid', name='output')(x)
    model = Model(inputs=input_layer, outputs=output_layer)
    # compile model
    model.compile(loss='binary_crossentropy', optimizer='adam')
    return model
Beispiel #27
0
def get_discriminator(input_size, G, G_input_size):
    """Returns the discriminator networks `D` and `DG`.
    `input_size` is the input size of `D` and `DG`.
    `G` is the generator model.
    
    Return a tuple of 2 elements:
      * The first element is `D`, the discriminator/critic
      * The second element id `DG` -> D(G(z))"""
    
    xavier = initializers.glorot_normal()

    x = Input(shape=(input_size,), name='input_x')
    a = Dense(256, kernel_initializer=xavier)(x)
    a = LeakyReLU()(a)
    a = Dropout(0.5)(a)
    a = Dense(256, kernel_initializer=xavier)(a)
    a = LeakyReLU()(a)
    a = Dropout(0.5)(a)
    a = Dense(16, kernel_initializer=xavier)(a)
    a = LeakyReLU()(a)
    a = Dropout(0.5)(a)

    # creates an output to determine if an input is fake
    is_fake = Dense(1, activation='linear', name='output_is_fake')(a)

    # creates a D model that receives a real example as input
    D = Model(inputs=[x], outputs=[is_fake], name='D')

    # creates another model that uses G as input
    z = Input(shape=(G_input_size,), name='D_input_z')
    is_fake = D(G(inputs=[z]))
    DG = Model(inputs=[z], outputs=[is_fake], name='DG')

    # D shouldn't be trained during the generator's training faze
    DG.get_layer('D').trainable = False
    DG.compile(optimizer=RMSprop(lr=1e-3), loss=[mean])

    D.trainable = True
    D.compile(optimizer=RMSprop(lr=1e-3), loss=[wasserstein])

    return D, DG
Beispiel #28
0
def ref_cnn(input_shape, n_class, model_size_info):
    '''
    model_size_info: CNN1(channel, kernel, stride (time-freq)) + CNN2 + L + FC
    '''
    # model size
    CNN1 = model_size_info[:5]
    CNN2 = model_size_info[5:10]
    L_size = model_size_info[-2]
    FC_size = model_size_info[-1]
    # start
    cnn_init = initializers.glorot_normal()
    model = Sequential()
    model.add(
        Conv2D(CNN1[0],
               kernel_size=(CNN1[1], CNN1[2]),
               strides=(CNN1[3], CNN1[4]),
               activation='relu',
               input_shape=input_shape,
               kernel_initializer=cnn_init,
               padding='valid'))
    #model.add(layers.BatchNormalization())
    #model.add(Dropout(0.5))
    model.add(MaxPooling2D(pool_size=(1, 2), strides=(1, 2)))
    model.add(
        Conv2D(CNN2[0],
               kernel_size=(CNN2[1], CNN2[2]),
               strides=(CNN2[3], CNN2[4]),
               activation='relu',
               kernel_initializer=cnn_init,
               padding='valid'))
    #model.add(layers.BatchNormalization())
    #model.add(Dropout(0.5))
    # batch norm dropout
    #model.add(Conv2D(CNNChannel, (CNNkernel,CNNkernel), activation='relu',kernel_initializer=cnn_init, padding='valid'))
    # batch norm dropout
    model.add(Flatten())
    model.add(Dense(L_size, activation='relu'))
    model.add(Dense(FC_size, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(n_class, activation='softmax'))
    return model
Beispiel #29
0
def training_data(tmpdir_factory):
    import h5py
    from keras.layers import Dense
    from keras.models import Sequential
    from keras.optimizers import SGD
    from keras.initializers import glorot_normal, normal

    from deepreplay.datasets.parabola import load_data
    from deepreplay.callbacks import ReplayData

    filename = str(tmpdir_factory.mktemp('data').join('training.h5'))

    X, y = load_data(xlim=(-1, 1), n_points=1000, shuffle=True, seed=13)

    sgd = SGD(lr=0.05)

    glorot_initializer = glorot_normal(seed=42)
    normal_initializer = normal(seed=42)

    replaydata = ReplayData(X, y, filename=filename, group_name='part1_activation_functions')

    model = Sequential()
    model.add(Dense(input_dim=2,
                    units=2,
                    kernel_initializer=glorot_initializer,
                    activation='sigmoid',
                    name='hidden'))

    model.add(Dense(units=1,
                    kernel_initializer=normal_initializer,
                    activation='sigmoid',
                    name='output'))

    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['acc'])

    model.fit(X, y, epochs=20, batch_size=16, callbacks=[replaydata])

    training_data = h5py.File(filename, 'r')
    return training_data['part1_activation_functions']
Beispiel #30
0
def build_fc_model(img_shape):
    initializer = glorot_normal()
    x0 = Input(img_shape, name='Input')

    raw_x = x0
    flattened_raw_x = Flatten()(raw_x)
    fc0 = Dense(num_fc_0,
                activation='relu',
                name='fully_informed_nodes',
                kernel_initializer=initializer,
                kernel_regularizer=l2(.001))(flattened_raw_x)

    fc1 = Dense(num_fc_1,
                activation='relu',
                name='dense_encoding',
                kernel_initializer=initializer,
                kernel_regularizer=l2(.001))(fc0)
    y = Dense(1, name='softmax', activation='sigmoid')(fc1)
    embedding_model = Model(inputs=x0, outputs=fc1)
    model = Model(inputs=x0, outputs=y)
    return model, embedding_model
def build_my_model(m, n):
    inp = Input(shape=(m, ))
    x = SpatialDropout1D(rate=0.24)(inp)
    x = Bidirectional(CuDNNLSTM(80,
                                return_sequences=True,
                                kernel_initializer=glorot_normal(seed=1029),
                                recurrent_initializer=orthogonal(gain=1.0, seed=1029)))(x)

    x_1 = Attention(m)(x)
    x_1 = DropConnect(Dense(32, activation="relu"), prob=0.2)(x_1)

    x_2 = Capsule(num_capsule=10, dim_capsule=10, routings=4, share_weights=True)(x)
    x_2 = Flatten()(x_2)
    x_2 = DropConnect(Dense(32, activation="relu"), prob=0.2)(x_2)

    conc = concatenate([x_1, x_2])
    # conc = add([x_1, x_2])
    outp = Dense(1, activation="sigmoid")(conc)
    model = Model(inputs=inp, outputs=outp)
    model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=[f1])
    return model
Beispiel #32
0
 def parse_nn_wout_json(self, modelfile):
     with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
         with CustomObjectScope({'GlorotNormal': glorot_normal()}):
             try:
                 model = models.load_model(modelfile)
             except:
                 pass
             try:
                 model = loadmodel(modelfile)
             except:
                 print(
                     'We cannot load the model, make sure the keras file was saved in a supported version'
                 )
                 print(err)
     [nl, ni, no] = self.get_shape(model)
     [lys, lfs] = self.get_layers(model, nl)
     #lfs = self.fix_activations(lys,lfs)
     [lsize, n, nls] = self.get_neurons(model, nl)
     [W, b] = self.get_parameters(model, nl, nls)
     return self.save_nnmat_file(model, ni, no, nls, n, lsize, W, b, lys,
                                 lfs)
Beispiel #33
0
def attention_capsule(maxlen, max_features, embed_size, embedding_matrix, num_classes):
    inp = Input(shape=(maxlen,))
    x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    x = SpatialDropout1D(rate=0.24)(x)
    x = Bidirectional(LSTM(80,
                                return_sequences=True,
                                kernel_initializer=glorot_normal(seed=1029),
                                recurrent_initializer=orthogonal(gain=1.0, seed=1029)))(x)

    x_1 = Attention(maxlen)(x)
    x_1 = DropConnect(Dense(32, activation="relu"), prob=0.2)(x_1)

    x_2 = Capsule(num_capsule=10, dim_capsule=10, routings=4, share_weights=True)(x)
    x_2 = Flatten()(x_2)
    x_2 = DropConnect(Dense(32, activation="relu"), prob=0.2)(x_2)
    conc = concatenate([x_1, x_2])
    # conc = add([x_1, x_2])
    outp = Dense(num_classes, activation="sigmoid")(conc)
    model = Model(inputs=inp, outputs=outp)
    model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
    return model
Beispiel #34
0
from deepreplay.callbacks import ReplayData
from deepreplay.replay import Replay
from deepreplay.plot import compose_animations, compose_plots

from sklearn.datasets import make_moons

import matplotlib.pyplot as plt

group_name = 'moons'

X, y = make_moons(n_samples=2000, random_state=27, noise=0.03)

sgd = SGD(lr=0.01)

glorot_initializer = glorot_normal(seed=42)
normal_initializer = normal(seed=42)

replaydata = ReplayData(X, y, filename='moons_dataset.h5', group_name=group_name)

model = Sequential()
model.add(Dense(input_dim=2,
                units=4,
                kernel_initializer=glorot_initializer,
                activation='tanh'))
model.add(Dense(units=2,
                kernel_initializer=glorot_initializer,
                activation='tanh',
                name='hidden'))

model.add(Dense(units=1,
Beispiel #35
0
def test_glorot_normal(tensor_shape):
    fan_in, fan_out = initializers._compute_fans(tensor_shape)
    std = np.sqrt(2. / (fan_in + fan_out))
    _runner(initializers.glorot_normal(), tensor_shape,
            target_mean=0., target_std=std)
    def _make_model(self):
        if self.is_hgg:
            dropout_rate = 0.1
        else:
            dropout_rate = 0.5
        step = 0
        print('******************************************', step)
        step += 1
        model_to_make = Sequential()
        print('******************************************', step)
        step += 1
        model_to_make.add(Conv2D(64, (3, 3),
                                 kernel_initializer=glorot_normal(),
                                 bias_initializer='zeros',
                                 padding='same',
                                 data_format='channels_first',
                                 input_shape=(4, 33, 33)
                                 ))
        print(model_to_make.input_shape)
        print(model_to_make.output)
        print('******************************************', step)
        step += 1
        model_to_make.add(LeakyReLU(alpha=0.333))
        print(model_to_make.output)
        print('******************************************', step)
        step += 1
        model_to_make.add(Conv2D(filters=64,
                                 kernel_size=(3, 3),
                                 padding='same',
                                 data_format='channels_first',
                                 input_shape=(64, 33, 33)))
        print(model_to_make.output)
        print('******************************************', step)
        step += 1
        model_to_make.add(LeakyReLU(alpha=0.333))
        print(model_to_make.output)
        if self.is_hgg:
            model_to_make.add(Conv2D(filters=64,
                                     kernel_size=(3, 3),
                                     padding='same',
                                     data_format='channels_first',
                                     input_shape=(64, 33, 33)))
            print('******************************************', step)
            step += 1
            print(model_to_make.output)

            model_to_make.add(LeakyReLU(alpha=0.333))
            print('******************************************', step)
            step += 1
            print(model_to_make.output)

        model_to_make.add(MaxPool2D(pool_size=(3, 3),
                                    strides=(2, 2),
                                    data_format='channels_first',
                                    input_shape=(64, 33, 33)))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)

        model_to_make.add(Conv2D(filters=128,
                                 kernel_size=(3, 3),
                                 padding='same',
                                 data_format='channels_first',
                                 input_shape=(64, 16, 16)))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)

        model_to_make.add(LeakyReLU(alpha=0.333))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)

        model_to_make.add(Conv2D(filters=128,
                                 kernel_size=(3, 3),
                                 padding='same',
                                 data_format='channels_first',
                                 input_shape=(128, 16, 16)))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)

        if self.is_hgg:
            model_to_make.add(Conv2D(filters=128,
                                     kernel_size=(3, 3),
                                     padding='same',
                                     data_format='channels_first',
                                     input_shape=(128, 16, 16)))
            print('******************************************', step)
            step += 1
            print(model_to_make.output)
            model_to_make.add(LeakyReLU(alpha=0.333))
            print('******************************************', step)
            step += 1
            print(model_to_make.output)
        model_to_make.add(MaxPool2D(pool_size=(3, 3),
                                    strides=(2, 2),
                                    data_format='channels_first',
                                    input_shape=(128, 16, 16)))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        print('******************************************', 'flattened')
        model_to_make.add(Flatten())
        print(model_to_make.output)
        model_to_make.add(Dense(units=256, input_dim=6272))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(LeakyReLU(alpha=0.333))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Dropout(dropout_rate))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Dense(units=256, input_dim=256))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(LeakyReLU(alpha=0.333))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Dropout(dropout_rate))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Dense(units=5,
                                input_dim=256))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Activation('softmax'))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        self.model = model_to_make