Ejemplo n.º 1
0
def generator_SN(latent_dim, image_shape, num_res_blocks, base_name):
    initializer = TruncatedNormal(mean=0, stddev=0.2, seed=42)
    in_x = Input(shape=(latent_dim,))

    h, w, c = image_shape

    x = Dense(64*8*h//8*w//8, activation="relu", name=base_name+"_dense")(in_x)
    x = Reshape((h//8, w//8, -1))(x)

    x = UpSampling2D((2, 2))(x)
    x = ConvSN2D(64*4, kernel_size=3, strides=1, padding='same', kernel_initializer=initializer,
                        name=base_name + "_conv1")(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn1")(x, training=1)
    x = Activation("relu")(x)

    # size//8→size//4→size//2→size
    x = UpSampling2D((2, 2))(x)
    x = ConvSN2D(64*2, kernel_size=3, strides=1, padding='same', kernel_initializer=initializer,
                        name=base_name + "_conv2")(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn2")(x, training=1)
    x = Activation("relu")(x)

    x = SelfAttention(ch=64*2, name=base_name+"_sa")(x)

    x = UpSampling2D((2, 2))(x)
    x = ConvSN2D(64*1, kernel_size=5, strides=2, padding='same', kernel_initializer=initializer,
                        name=base_name + "_conv3")(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn3")(x,training=1)
    x = Activation("relu")(x)
    x = UpSampling2D((2, 2))(x)
    out = ConvSN2D(3, kernel_size=3, strides=1, padding='same', activation="tanh",
                          kernel_initializer=initializer, name=base_name + "_out")(x)
    model = Model(in_x, out, name=base_name)
    return model
def DC_CNN_Model(length):
    input = Input(shape=(length, 1))

    l1a, l1b = DC_CNN_Block(32, 2, 1, 0.001)(input)
    l2a, l2b = DC_CNN_Block(32, 2, 2, 0.001)(l1a)
    l3a, l3b = DC_CNN_Block(32, 2, 4, 0.001)(l2a)
    l4a, l4b = DC_CNN_Block(32, 2, 8, 0.001)(l3a)
    l5a, l5b = DC_CNN_Block(32, 2, 16, 0.001)(l4a)
    l6a, l6b = DC_CNN_Block(32, 2, 32, 0.001)(l5a)
    l6b = Dropout(0.8)(l6b)  # dropout used to limit influence of earlier data
    l7a, l7b = DC_CNN_Block(32, 2, 64, 0.001)(l6a)
    l7b = Dropout(0.8)(l7b)  # dropout used to limit influence of earlier data

    l8 = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b])

    l9 = Activation('relu')(l8)

    l21 = Conv1D(1, 1, activation='linear', use_bias=False,
                 kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42),
                 kernel_regularizer=l2(0.001))(l9)

    model = Model(input=input, output=l21)

    adam = optimizers.Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=None,
                           decay=0.0, amsgrad=False)

    model.compile(loss='mae', optimizer=adam, metrics=['mse'])

    return model
Ejemplo n.º 3
0
def DC_CNN_Model(length):
    input = Input(shape=(length, 1))

    l1a, l1b = DC_CNN_Block(32, 2, 1, 0.001)(input)
    l2a, l2b = DC_CNN_Block(32, 2, 2, 0.001)(l1a)
    l3a, l3b = DC_CNN_Block(32, 2, 4, 0.001)(l2a)
    l4a, l4b = DC_CNN_Block(32, 2, 8, 0.001)(l3a)
    l5a, l5b = DC_CNN_Block(32, 2, 16, 0.001)(l4a)
    l6a, l6b = DC_CNN_Block(32, 2, 32, 0.001)(l5a)
    l6b = Dropout(0.8)(l6b)  # dropout used to limit influence of earlier data
    l7a, l7b = DC_CNN_Block(32, 2, 64, 0.001)(l6a)
    l7b = Dropout(0.8)(l7b)  # dropout used to limit influence of earlier data

    l8 = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b])

    l9 = Activation('relu')(l8)

    l21 = Conv1D(1,
                 1,
                 activation='linear',
                 use_bias=False,
                 kernel_initializer=TruncatedNormal(mean=0.0,
                                                    stddev=0.05,
                                                    seed=42),
                 kernel_regularizer=l2(0.001))(l9)

    model = Model(input=input, output=l21)

    return model
Ejemplo n.º 4
0
def conv2d(filters, kernel_size, strides=(1, 1), padding='same', bias_init=1, **kwargs):
    trunc = TruncatedNormal(mean=0.0, stddev=0.01)
    cnst = Constant(value=bias_init)
    return Conv2D(
        filters, kernel_size, strides=strides, padding=padding,
        activation='relu', kernel_initializer=trunc, bias_initializer=cnst, **kwargs
    )   
Ejemplo n.º 5
0
def dense(units, activation='relu'):
    trunc = TruncatedNormal(mean=0.0, stddev=0.01)
    cnst = Constant(value=1)
    return Dense(
        units, activation=activation,
        kernel_initializer=trunc, bias_initializer=cnst,
    )
def get_model_vgg():

    # image sizes after pre-processing
    global img_rows
    global img_cols
    global img_ch

    # define initializer
    k_i = TruncatedNormal(mean=0.0, stddev=0.01, seed=None)

    model = Sequential()
    model.add(
        Conv2D(64,
               3,
               strides=(1, 1),
               activation='relu',
               kernel_initializer=k_i,
               input_shape=(img_rows, img_cols, img_ch)))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(
        Conv2D(128,
               3,
               strides=(1, 1),
               activation='relu',
               kernel_initializer=k_i))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(
        Conv2D(256,
               3,
               strides=(1, 1),
               activation='relu',
               kernel_initializer=k_i))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(
        Conv2D(512,
               3,
               strides=(1, 1),
               activation='relu',
               kernel_initializer=k_i))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(2048, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4, activation='softmax'))

    # compiling the model
    #using default hyper parameters when creating new network
    Adam_Optimizer = Adam(lr=0.0005)
    model.compile(optimizer=Adam_Optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    #
    return model
Ejemplo n.º 7
0
def init():
    model.add(
        Conv2D(input_shape=INPUT_SHAPE,
               filters=CONV1_DEEP,
               kernel_size=(CONV1_SIZE, CONV1_SIZE),
               kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1),
               activation='relu',
               strides=1,
               padding='same'))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))

    model.add(
        Conv2D(filters=CONV2_DEEP,
               kernel_size=(CONV2_SIZE, CONV2_SIZE),
               kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1),
               activation='relu',
               strides=1,
               padding='same'))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))

    model.add(Flatten())

    model.add(
        Dense(units=DENSE1_SIZE,
              activation="relu",
              use_bias=True,
              bias_initializer=Zeros(),
              kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1)))

    model.add(
        Dense(units=DENSE2_SIZE,
              activation="relu",
              use_bias=True,
              bias_initializer=Zeros(),
              kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1)))

    model.add(
        Dense(units=OUTPUT_NODE,
              activation="softmax",
              kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1)))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer='sgd',
                  metrics=['accuracy'])
Ejemplo n.º 8
0
 def create_network(self, S, A, S_dim, layers, dropout, l2reg):
     h = concatenate([S, A])
     for l in layers:
         h = Dense(l,
                   activation="relu",
                   kernel_initializer=TruncatedNormal(),
                   bias_initializer=TruncatedNormal(),
                   kernel_regularizer=l2(l2reg),
                   )(h)
         h = Dropout(rate=dropout)(h)
     y_pred = Dense(S_dim,
                    activation='tanh',
                    kernel_initializer=TruncatedNormal(),
                    bias_initializer=TruncatedNormal(),
                    kernel_regularizer=l2(l2reg),
                    )(h)
     return y_pred
Ejemplo n.º 9
0
    def _create_model(self):
        if self._is_regression:
            loss_func = 'mean_squared_error'
        else:
            loss_func = 'categorical_crossentropy'
        model = Sequential()
        model.add(
            Dense(self._out_dim1,
                  input_dim=self._input_dim,
                  kernel_initializer=TruncatedNormal(stddev=0.01)))

        model.add(Activation(self._activation))
        #model.add(Activation(PReLU()))

        #model.add(BatchNormalization())
        model.add(Dropout(self._dropout1))
        model.add(
            Dense(self._out_dim2,
                  kernel_initializer=TruncatedNormal(stddev=0.01)))

        model.add(Activation(self._activation))
        #model.add(Activation(PReLU()))

        #model.add(BatchNormalization())
        model.add(Dropout(self._dropout2))
        model.add(
            Dense(self._out_dim3,
                  kernel_initializer=TruncatedNormal(stddev=0.01)))

        model.add(Activation(self._activation))
        #model.add(Activation(PReLU()))

        #model.add(BatchNormalization())
        model.add(Dropout(self._dropout3))

        if self._is_regression:
            model.add(Dense(1))
        else:
            model.add(Dense(2))
            model.add(Activation('softmax'))

        model.compile(loss=loss_func,
                      optimizer='adadelta',
                      metrics=['accuracy'])
        return model
Ejemplo n.º 10
0
def baseline_model():
    # create model
    model = Sequential()
    init = TruncatedNormal(stddev=0.01, seed=10)
    # config model: single-input model with 4 classes (categorical classification)
    model.add(
        Dense(units=50,
              input_dim=27,
              activation='relu',
              init=TruncatedNormal(stddev=0.01, seed=10)))
    model.add(Dense(units=4, activation='softmax', kernel_initializer=init))
    # Compile model
    # optimizer
    adam = Adam(lr=0.007)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model
Ejemplo n.º 11
0
 def create_model(self):
     model = Sequential()
     model.add(
         Dense(4,
               kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.01),
               activation='tanh',
               input_shape=(self.INPUT_DIM, )))
     model.add(
         Dense(8,
               kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.01),
               activation='tanh'))
     model.add(
         Dense(self.ACTIONS,
               kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.01)))
     model.compile(loss='mean_absolute_error',
                   optimizer='rmsprop',
                   metrics=['accuracy'])
     self.model = model
Ejemplo n.º 12
0
    def _fire_layer(self, name, input, s1x1, e1x1, e3x3, stdd=0.01):
        """
            wrapper for fire layer constructions

            :param name: name for layer
            :param input: previous layer
            :param s1x1: number of filters for squeezing
            :param e1x1: number of filter for expand 1x1
            :param e3x3: number of filter for expand 3x3
            :param stdd: standard deviation used for intialization
            :return: a keras fire layer
            """

        sq1x1 = Conv2D(name=name + '/squeeze1x1',
                       filters=s1x1,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       use_bias=True,
                       padding='SAME',
                       kernel_initializer=TruncatedNormal(stddev=stdd),
                       activation="relu",
                       kernel_regularizer=l2(self.config.WEIGHT_DECAY))(input)

        ex1x1 = Conv2D(name=name + '/expand1x1',
                       filters=e1x1,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       use_bias=True,
                       padding='SAME',
                       kernel_initializer=TruncatedNormal(stddev=stdd),
                       activation="relu",
                       kernel_regularizer=l2(self.config.WEIGHT_DECAY))(sq1x1)

        ex3x3 = Conv2D(name=name + '/expand3x3',
                       filters=e3x3,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       use_bias=True,
                       padding='SAME',
                       kernel_initializer=TruncatedNormal(stddev=stdd),
                       activation="relu",
                       kernel_regularizer=l2(self.config.WEIGHT_DECAY))(sq1x1)

        return concatenate([ex1x1, ex3x3], axis=3)
Ejemplo n.º 13
0
    def build(width, height, depth, classes):
        # 不同工具包颜色通道位置可能不一致
        model = Sequential()
        inputShape = (height, width, depth)
        chanDim = -1

        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1

        model.add(
            Conv2D(32, (5, 5),
                   padding="same",
                   input_shape=inputShape,
                   kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.01)))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(64, (5, 5),
                   padding="same",
                   input_shape=inputShape,
                   kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.01)))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        # FC层
        model.add(Flatten())
        model.add(
            Dense(512,
                  kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.01)))
        model.add(Activation("relu"))
        # model.add(BatchNormalization())
        model.add(Dropout(0.6))

        # softmax 分类
        model.add(
            Dense(classes,
                  kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.01)))
        model.add(Activation("softmax"))

        return model
Ejemplo n.º 14
0
 def __init__(self,
              num_classes,
              num_seqs,
              time_steps,
              use_embedding,
              embedding_size,
              lstm_units,
              num_layers,
              learning_rate,
              dropout_rate,
              save_file=None):
     self.num_classes = num_classes
     inputs = Input(batch_shape=(num_seqs, time_steps))
     if use_embedding:
         x = Embedding(num_classes, embedding_size,
                       name='embedding')(inputs)
     else:
         x = to_categorical(inputs, num_classes)
     layer_num = 1
     while layer_num <= num_layers:
         x = LSTM(lstm_units,
                  return_sequences=True,
                  dropout=dropout_rate,
                  stateful=True,
                  name='lstm_' + str(layer_num))(x)
         x = Dropout(dropout_rate, name='dropout_' + str(layer_num))(x)
         layer_num += 1
     predictions = Dense(num_classes,
                         activation='softmax',
                         kernel_initializer=TruncatedNormal(stddev=0.1),
                         bias_initializer='zeros',
                         name='softmax')(x)
     self.model = Model(inputs=inputs, outputs=predictions)
     '''
     self.model = Sequential()
     if use_embedding:
         self.model.add(Embedding(num_classes, embedding_size, name='embedding'))
     layer_num = 1
     while layer_num <= num_layers:
         if layer_num == 1:
             if not use_embedding:
                 self.model.add(LSTM(lstm_units, return_sequences=True, input_dim=num_classes, name='lstm_'+str(layer_num)))
             else:
                 self.model.add(LSTM(lstm_units, return_sequences=True, name='lstm_'+str(layer_num)))
         self.model.add(Dropout(dropout_rate, name='dropout_'+str(layer_num)))
         layer_num += 1
     self.model.add(Dense(num_classes, activation='softmax', name='softmax'))
     '''
     self.model.compile(loss='categorical_crossentropy',
                        optimizer=optimizers.Adam(lr=learning_rate,
                                                  clipnorm=5),
                        metrics=['accuracy'])
     if save_file:
         #self.model = load_model(save_file)
         self.model.load_weights(save_file)
     print(self.model.summary())
Ejemplo n.º 15
0
 def __init__(self,
              n_units,
              n_layers=1,
              lstm_bias=1,
              w_init=TruncatedNormal(stddev=0.05),
              recurrent_init=None,
              bidirectional=True,
              learn_initial_states=False):
     super().__init__("LSTM", n_units, n_layers, w_init, recurrent_init, bidirectional,
                      learn_initial_states, lstm_bias)
Ejemplo n.º 16
0
 def build(self, input_shape):
     self.dim = input_shape[-1]
     self.full_shape = input_shape
     # random init (but overridden if self.init is given, which feeds starting values e.g. input mean)
     self.pixels = self.add_weight(name='pseudos',
                                   shape=(self.shape, ),
                                   initializer=TruncatedNormal(mean=0.5,
                                                               stddev=0.25),
                                   trainable=True)
     super(PseudoInput, self).build(input_shape)
Ejemplo n.º 17
0
    def decoder(self, params):
        print('genetic_params:', params)
        NUM_NET_1 = 1  # for LR
        NUM_NET_2 = 8  # for the rest network params
        NUM_TIME = 3
        NUM_DELAY_TYPE = 3
        NUM_DELAY = 4

        # netowr_params
        BATCH_SIZE = [16, 32, 64, 128]
        SEQ_LEN = [16, 32, 64, 128]
        STATE_SIZE = [16, 32, 64, 128]
        LR = list(np.logspace(-3, -6, 16))
        DR = [0.99, 0.98, 0.97, 0.96]
        PKEEP = [0.9, 0.8, 0.7, 0.6]
        ACTIVATION = ["relu", "tanh", "sigmoid", "softsign"]
        INIT = [zeros(), TruncatedNormal(), Orthogonal(), RandomUniform()]
        net_name = ['lr', 'batch_size', 'seq_len', 'state_size', 'dr', 'pkeep', 'optimizer', 'activation_f', 'initializer']
        network_params = {}
        network_params['lr'] = LR[BitArray(params[0: NUM_NET_1 * 4]).uint]
        for i in range(NUM_NET_2):
            name = net_name[i + 1]
            network_params[name] = BitArray(params[4 + i * 2: 4 + i * 2 + 2]).uint
        network_params['batch_size'] = BATCH_SIZE[network_params['batch_size']]
        network_params['seq_len'] = SEQ_LEN[network_params['seq_len']]
        network_params['state_size'] = STATE_SIZE[network_params['state_size']]
        network_params['dr'] = DR[network_params['dr']]
        network_params['pkeep'] = PKEEP[network_params['pkeep']]
        network_params['activation_f'] = ACTIVATION[network_params['activation_f']]
        network_params['initializer'] = INIT[network_params['initializer']]

        # timeseries_params
        timeseries_params = {}

        TIME_STEP_DAYS = [7, 14, 30, 60]
        TIME_STEP_WEEKS = [4, 8, 12, 24]
        TIME_STEP_MONTHS = [2, 3, 6, 9]
        TIME_STEP = [TIME_STEP_DAYS, TIME_STEP_WEEKS, TIME_STEP_MONTHS]
        step_name = ['time_series_step_days', 'time_series_step_weeks', 'time_series_step_months']
        for index in range(NUM_TIME):
            name = step_name[index]
            step = TIME_STEP[index]
            timeseries_params[name] = step[BitArray(params[20 + index * 2: 20 + index * 2 + 2]).uint]

        DELAY = [7, 14, 30, 60, 90, 120, 150, 180]
        delay_name_days = ['delay_google_days', 'delay_tweeter_days', 'delay_macro_days', 'delay_tweeter_re_days']
        delay_name_weeks = ['delay_google_weeks', 'delay_tweeter_weeks', 'delay_macro_weeks', 'delay_tweeter_re_weeks']
        delay_name_months = ['delay_google_months', 'delay_tweeter_months', 'delay_macro_months', 'delay_tweeter_re_months']
        delay_name = [delay_name_days, delay_name_weeks, delay_name_months]
        for type in range(NUM_DELAY_TYPE):
            name_list = delay_name[type]
            for index in range(NUM_DELAY):
                name = name_list[index]
                timeseries_params[name] = DELAY[BitArray(params[26 + index * 3: 26 + index * 3 + 3]).uint]
        return network_params, timeseries_params
Ejemplo n.º 18
0
def build_model_bilstm(num_output,
                       input_shape=None,
                       learning_rate=1e-3,
                       num_layer_lstm=500):
    if input_shape is None:
        input_shape = [60, 75]

    K.clear_session()
    initalizer = TruncatedNormal(stddev=0.1)

    # Building the model
    model = Sequential()

    # model.add(Conv2D)
    model.add(
        Dense(32,
              activation='relu',
              kernel_initializer=initalizer,
              bias_initializer=initalizer,
              input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))

    model.add(
        Bidirectional(LSTM(num_layer_lstm, return_sequences=False),
                      merge_mode='ave'))
    # model.add(Bidirectional(LSTM(50, return_sequences=False)))
    # model.add(GlobalMaxPool1D())
    model.add(Dropout(0.3))
    model.add(
        Dense(1024,
              activation='relu',
              kernel_initializer=initalizer,
              bias_initializer=initalizer))
    model.add(BatchNormalization())
    model.add(Dropout(0.1))
    model.add(
        Dense(512,
              activation='relu',
              kernel_initializer=initalizer,
              bias_initializer=initalizer))
    model.add(BatchNormalization())
    model.add(Dropout(0.1))
    # model.add(Dense(128, activation='relu', kernel_initializer=initalizer, bias_initializer=initalizer))
    model.add(Dense(num_output, activation="softmax"))

    # Compiling the model
    optimizer = optimizers.RMSprop(lr=learning_rate,
                                   rho=0.9,
                                   epsilon=1e-6,
                                   decay=0.0)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
Ejemplo n.º 19
0
def generator(latent_dim, image_shape, num_res_blocks, base_name):
    initializer = TruncatedNormal(mean=0, stddev=0.2, seed=42)
    in_x = Input(shape=(latent_dim, ))

    h, w, c = image_shape

    x = Dense(64 * 8 * h // 8 * w // 8,
              activation="relu",
              name=base_name + "_dense")(in_x)
    x = Reshape((h // 8, w // 8, -1))(x)

    x = Conv2DTranspose(64 * 4,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        kernel_initializer=initializer,
                        name=base_name + "_deconv1")(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5,
                           name=base_name + "_bn1")(x, training=1)

    for i in range(num_res_blocks):
        x = residual_block(x,
                           base_name=base_name,
                           block_num=i,
                           initializer=initializer,
                           num_channels=64 * 4)

    # size//8→size//4→size//2→size
    x = Conv2DTranspose(64 * 2,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        kernel_initializer=initializer,
                        name=base_name + "_deconv2")(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5,
                           name=base_name + "_bn2")(x, training=1)
    x = Activation("relu")(x)
    x = Conv2DTranspose(64 * 1,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        kernel_initializer=initializer,
                        name=base_name + "_deconv3")(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5,
                           name=base_name + "_bn3")(x, training=1)
    x = Activation("relu")(x)
    out = Conv2DTranspose(3,
                          kernel_size=7,
                          strides=1,
                          padding='same',
                          activation="tanh",
                          kernel_initializer=initializer,
                          name=base_name + "_out")(x)
    model = Model(in_x, out, name=base_name)
    return model
Ejemplo n.º 20
0
def fire_layer(name, input, s1x1, e1x1, e3x3, stdd=0.01, regularizer=None):
    """
    wrapper for fire layer constructions
    @param name: name for layer
    @param input: previous layer
    @param s1x1: number of filters for squeezing
    @param e1x1: number of filter for expand 1x1
    @param e3x3: number of filter for expand 3x3
    @param stdd: standard deviation used for intialization
    """

    sq1x1 = Conv2D(name=name + '/squeeze1x1',
                   filters=s1x1,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   use_bias=True,
                   padding='SAME',
                   kernel_initializer=TruncatedNormal(stddev=stdd),
                   activation='relu',
                   kernel_regularizer=regularizer)(input)

    ex1x1 = Conv2D(name=name + '/expand1x1',
                   filters=e1x1,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   use_bias=True,
                   padding='SAME',
                   kernel_initializer=TruncatedNormal(stddev=stdd),
                   activation='relu',
                   kernel_regularizer=regularizer)(sq1x1)

    ex3x3 = Conv2D(name=name + '/expand3x3',
                   filters=e3x3,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   use_bias=True,
                   padding='SAME',
                   kernel_initializer=TruncatedNormal(stddev=stdd),
                   activation='relu',
                   kernel_regularizer=regularizer)(sq1x1)

    return concatenate([ex1x1, ex3x3], axis=3)
def MLP():
    inputs = Input(shape=(EMBEDDING_SHAPE))
    layer = Dense(32,
                  activation="relu",
                  kernel_initializer=TruncatedNormal(mean=0.0,
                                                     stddev=0.01))(inputs)
    layer = Dense(256,
                  activation="relu",
                  kernel_initializer=TruncatedNormal(mean=0.0,
                                                     stddev=0.01))(layer)
    layer = Dropout(0.3)(layer)
    layer = Dense(256,
                  activation="relu",
                  kernel_initializer=TruncatedNormal(mean=0.0,
                                                     stddev=0.01))(layer)
    layer = Dropout(0.3)(layer)
    layer = Dense(512,
                  activation="relu",
                  kernel_initializer=TruncatedNormal(mean=0.0,
                                                     stddev=0.01))(layer)
    layer = Dropout(0.3)(layer)
    layer = Dense(512,
                  activation="relu",
                  kernel_initializer=TruncatedNormal(mean=0.0,
                                                     stddev=0.01))(layer)
    layer = Dropout(0.3)(layer)
    layer = Dense(256,
                  activation="relu",
                  kernel_initializer=TruncatedNormal(mean=0.0,
                                                     stddev=0.01))(layer)
    layer = Dropout(0.3)(layer)
    layer = Dense(256,
                  activation="relu",
                  kernel_initializer=TruncatedNormal(mean=0.0,
                                                     stddev=0.01))(layer)
    layer = Dropout(0.3)(layer)
    layer = Dense(32,
                  activation="relu",
                  kernel_initializer=TruncatedNormal(mean=0.0,
                                                     stddev=0.01))(layer)
    outputs = Dense(4,
                    activation="sigmoid",
                    kernel_initializer=TruncatedNormal(mean=0.0,
                                                       stddev=0.01))(layer)

    model = Model(input=inputs, output=outputs)
    return model
Ejemplo n.º 22
0
def train():
    if path.exists('CaptchaSplit.h5'):
        model = load_model('CaptchaSplit.h5')
    else:
        model = Sequential()
        model.add(
            Conv2D(filters=32,
                   kernel_size=5,
                   activation='relu',
                   padding='same',
                   kernel_initializer=TruncatedNormal(stddev=0.1),
                   bias_initializer=Constant(0.1),
                   input_shape=(40, 60, 1)))
        model.add(MaxPool2D(pool_size=2, padding='same'))
        model.add(
            Conv2D(filters=64,
                   kernel_size=5,
                   activation='relu',
                   padding='same',
                   kernel_initializer=TruncatedNormal(stddev=0.1),
                   bias_initializer=Constant(0.1)))
        model.add(MaxPool2D(pool_size=2, padding='same'))
        model.add(Flatten())
        model.add(Dense(units=1024, activation='relu'))
        model.add(Dropout(rate=0.5))
        model.add(Dense(units=10, activation='softmax'))

        model.compile(loss=losses.categorical_crossentropy,
                      optimizer=optimizers.Adam(lr=1e-4),
                      metrics=[metrics.categorical_accuracy])

    with np.load(path.join('Dataset', 'dataset.npz')) as f:
        x_train, y_train = f['x_train'], f['y_train']
        x_test, y_test = f['x_test'], f['y_test']
        x_train = image_preprocess(x_train)
        x_test = image_preprocess(x_test)
        y_train = binarization(y_train)
        y_test = binarization(y_test)
        model.fit(x_train, y_train, epochs=5, batch_size=50, verbose=2)
        print(model.evaluate(x_test, y_test))
    model.save('CaptchaSplit.h5')
    return model
Ejemplo n.º 23
0
def buildmodel(show_model=False):
    input1 = Input(shape=(rows*cols,2),dtype='float32')
#    x = Conv2D(16,(2,2),strides=(1,1),padding='same',
#               activation='relu',
#               kernel_initializer=TruncatedNormal(),
#               bias_initializer='zeros')(input1)
    flat1 = Flatten()(input1)
#    input2 = Input(shape=[2],dtype='float32',name='in2')
#    x = keras.layers.concatenate([flat1,input2])
    x = Dense(64,kernel_initializer=TruncatedNormal(),
              bias_initializer='zeros',activation='relu')(flat1)
    #x = Dropout(0.5)(x) large data set, no need for dropout
    x = Dense(32,kernel_initializer=TruncatedNormal(),
              bias_initializer='zeros',activation='relu')(x)
    x = Dense(ACTIONS,kernel_initializer=TruncatedNormal(),
                   bias_initializer='zeros')(x)
    model = Model(input1, x)
    model.compile(loss='mse',optimizer='adam')
    if show_model: print(model.summary())
    return model
Ejemplo n.º 24
0
 def build(self, input_shape):
     self.scale = self.add_weight(name='scale',
                                  shape=(input_shape[1], ),
                                  initializer=TruncatedNormal(mean=1.0,
                                                              stddev=0.02),
                                  trainable=True)
     self.shift = self.add_weight(name='shift',
                                  shape=(input_shape[1], ),
                                  initializer=Constant(0.0),
                                  trainable=True)
     super(InstanceNormalization2D, self).build(input_shape)
Ejemplo n.º 25
0
def Conv2DInstanceNorm(inputs, filters, kernel_size, strides=1, relu=True):
    weights_init = TruncatedNormal(stddev=WEIGHTS_INIT_STDEV, seed=1)
    conv = Conv2D(filters, (kernel_size, kernel_size),
                  strides=strides,
                  padding='same',
                  kernel_initializer=weights_init,
                  use_bias=False)(inputs)
    norm = InstanceNormalization(axis=3)(conv)
    if relu:
        norm = Activation('relu')(norm)
    return norm
Ejemplo n.º 26
0
def resnet50_rpn(base_model,
                 load_weights=False,
                 weight_regularizer=None,
                 bias_regularizer=None,
                 include_conv=False,
                 anchors_per_loc=DEFAULT_ANCHORS_PER_LOC):
    """
    Creates an rpn model on top of a passed in base model.
    :param base_model: Keras model returned by resnet50_base, containing only the first 4 blocks.
    :param weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
    regularization.
    :param bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
        regularization.
    :param include_conv: boolean for whether the conv4 output should be included in the model output.
    :param anchors_per_loc: number of anchors at each convolution position.
    :return: Keras model with the rpn layers on top of the base layers. Weights are initialized to Imagenet weights.
    """
    net = Conv2D(512, (3, 3),
                 padding='same',
                 activation='relu',
                 kernel_initializer='normal',
                 kernel_regularizer=weight_regularizer,
                 bias_regularizer=bias_regularizer,
                 name='rpn_conv1')(base_model.output)

    gaussian_initializer = TruncatedNormal(stddev=0.01)
    x_class = Conv2D(anchors_per_loc, (1, 1),
                     activation='sigmoid',
                     kernel_initializer=gaussian_initializer,
                     kernel_regularizer=weight_regularizer,
                     bias_regularizer=bias_regularizer,
                     name='rpn_out_cls')(net)
    x_regr = Conv2D(anchors_per_loc * 4, (1, 1),
                    activation='linear',
                    kernel_initializer=gaussian_initializer,
                    kernel_regularizer=weight_regularizer,
                    bias_regularizer=bias_regularizer,
                    name='rpn_out_bbreg')(net)

    outputs = [x_class, x_regr]
    if include_conv:
        outputs.append(base_model.output)

    rpn_model = Model(inputs=base_model.inputs, outputs=outputs)

    if load_weights:
        # weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
        #                         WEIGHTS_PATH_NO_TOP,
        #                         cache_subdir='models',
        #                         md5_hash='a268eb855778b3df3c7506639542a6af')
        weights_path = "models/rpn_weights_{}_step1.h5".format("resnet50")
        rpn_model.load_weights(weights_path, by_name=True)
    return rpn_model
Ejemplo n.º 27
0
 def create_actor_network(self, state_size, action_dim):
     logging.getLogger("learner").info("building actor")
     S = Input(shape=[state_size, ], name='actor_input')
     # HIDDEN1_UNITS=100, relu
     h0 = Dense(self.network_config['hlayer_1_size'], activation=self.network_config['hlayer_1_type'],
                name='actor_h0')(S)
     # HIDDEN2_UNITS=200, relu
     h1 = Dense(self.network_config['hlayer_2_size'], activation=self.network_config['hlayer_2_type'],
                name='actor_h1')(h0)
     A = Dense(action_dim, activation='tanh', init=TruncatedNormal(stddev=0.5), name='actor_A')(h1)
     model = Model(input=S, output=A)
     return model, model.trainable_weights, S
Ejemplo n.º 28
0
def learningDL():
    plusData = pd.read_csv('plus.csv', sep=",", comment="#")

    #...x(説明変数)、y(目的変数) への分割
    x = plusData.values[:, 2:]
    y = plusData.values[:, 1]

    #...x, y の学習用・テスト用への分割
    #......test_size : テストに使用するデータの割合
    #......random_state : ランダムに分割する際の乱数のシード
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.5,
                                                        random_state=0)

    # 標準化
    scaler = StandardScaler().fit(x_train)
    x_train_transformed = scaler.transform(x_train)
    x_test_transformed = scaler.transform(x_test)

    # 全結合のニューラルネットワークに対応する MLPClassifierを読込
    print("...MLPClassifier で学習")
    classifier = MLPClassifier()
    classifier.fit(x_train_transformed, y_train)
    print(classifier.score(x_test_transformed, y_test))
    print("")

    # Kerasによるニューラルネットワークの構築
    print("Kerasによるニューラルネットワークの構築")
    model = Sequential()
    model.add(Dense(10, activation='relu', input_dim=2))
    # model.add(Dense(1, activation='sigmoid'))
    model.add(Dense(1, kernel_initializer=TruncatedNormal(stddev=0.01)))
    print("ネットワークの重みの表示:", model.get_weights())
    print("")

    # Kerasによる学習
    print("Kerasによる学習")
    # 損失関数の設定
    model.compile(
        loss='binary_crossentropy',  # 2値の分類問題の場合、binary_crossentropy を使用
        optimizer='sgd',  # 最適化アルゴリズムの指定
        metrics=['accuracy'])  # 評価関数を指定。これを書くと精度が表示される
    # 学習を実行
    model.fit(
        x_train_transformed,
        y_train,  # 入力するデータと、教師データ
        epochs=200,  # エポック数
        batch_size=64)  # バッチサイズ
    print("")

    with open("KerasLearning.pickle", mode='wb') as fp:
        pickle.dump(model, fp)
Ejemplo n.º 29
0
    def build_model(self):

        inputs = Input(shape=(self.input_dim, ))
        hidden = Dense(self.base_nodes * 1,
                       activation='relu',
                       kernel_initializer=TruncatedNormal(mean=0.0,
                                                          stddev=0.05,
                                                          seed=42),
                       bias_initializer=TruncatedNormal(mean=0.0,
                                                        stddev=0.05,
                                                        seed=42))(inputs)
        hidden = Dense(self.base_nodes * 2,
                       activation='relu',
                       kernel_initializer=TruncatedNormal(mean=0.0,
                                                          stddev=0.05,
                                                          seed=42),
                       bias_initializer=TruncatedNormal(mean=0.0,
                                                        stddev=0.05,
                                                        seed=42))(hidden)
        outputs = Dense(self.output_dim,
                        activation='linear',
                        kernel_initializer=TruncatedNormal(mean=0.0,
                                                           stddev=0.05,
                                                           seed=42),
                        bias_initializer=TruncatedNormal(mean=0.0,
                                                         stddev=0.05,
                                                         seed=42))(hidden)

        model = Model(inputs=inputs, outputs=outputs)
        model.compile(optimizer=Adam(lr=self.lr), loss='mse')

        return model
Ejemplo n.º 30
0
    def _create_model(self):
        """
        #builds the Keras model from config
        #return: squeezeDet in Keras
        """
        input_shape = (self.config.IMAGE_HEIGHT, self.config.IMAGE_WIDTH,
                       self.config.N_CHANNELS)
        input_layer = Input(shape=input_shape, name="input")

        self.base_mn2 = MobileNetV2(input_shape=input_shape,
                                    input_tensor=input_layer,
                                    include_top=False)
        trainable = False
        for l in self.base_mn2.layers:
            if l.name == self.config.BASE_TRAIN_START_LAYER: trainable = True
            l.trainable = trainable
        x = self.base_mn2(input_layer)
        if self.config.MODEL_UPSCALE != 'False':
            x = Conv2DTranspose(256, (3, 3),
                                strides=(2, 2),
                                padding='same',
                                name='deconv')(x)

        dropout11 = Dropout(rate=self.config.KEEP_PROB, name='drop11')(x)

        #compute the number of output nodes from number of anchors, classes, confidence score and bounding box corners
        num_output = self.config.ANCHOR_PER_GRID * (self.config.CLASSES + 1 +
                                                    4)

        preds = Conv2D(name='conv12',
                       filters=num_output,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       activation=None,
                       padding="SAME",
                       use_bias=True,
                       kernel_initializer=TruncatedNormal(stddev=0.001),
                       kernel_regularizer=l2(
                           self.config.WEIGHT_DECAY))(dropout11)

        model = Model(inputs=input_layer, outputs=preds)
        model.summary()

        #reshape
        pred_reshaped = Reshape((self.config.ANCHORS, -1))(preds)

        #pad for loss function so y_pred and y_true have the same dimensions, wrap in lambda layer
        pred_padded = Lambda(self._pad)(pred_reshaped)

        model = Model(inputs=input_layer, outputs=pred_padded)

        return model