def build_model(self):
     self.model = Sequential()
     self.model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=self.input_shape,
                          kernel_initializer=VarianceScaling(seed=0)))
     self.model.add(Conv2D(32, (3, 3), activation='relu',
                          kernel_initializer=VarianceScaling(seed=0)))
     self.model.add(MaxPooling2D(pool_size=(2, 2)))
     self.model.add(Dropout(0.25))
     self.model.add(Flatten())
     self.model.add(Dense(64, activation='relu',
                         kernel_initializer=VarianceScaling(seed=0),))
     self.model.add(Dropout(0.5))
     self.model.add(Dense(self.num_classes, activation='sigmoid'))
def build_model():
    model = Sequential()
    model.add(
        Dense(64,
              input_shape=(4, ),
              activation="elu",
              kernel_initializer=VarianceScaling()))
    model.add(Dense(64, activation="elu",
                    kernel_initializer=VarianceScaling()))
    model.add(
        Dense(4, activation="linear", kernel_initializer=VarianceScaling()))
    model.compile(loss="mse", optimizer=RMSprop(lr=0.001))
    model._make_predict_function()
    return model
예제 #3
0
파일: generator.py 프로젝트: philou93/srgan
    def create_model(self):
        inputs = Input(shape=(None, None, 3))
        # Couche pour extraire les feature de l'image LR
        x = Conv2D(128, kernel_size=9, strides=1, padding="same", activation="relu",
                   kernel_initializer=VarianceScaling(scale=2))(inputs)
        x = Conv2D(128, kernel_size=9, strides=1, padding="same", activation="relu",
                   kernel_initializer=VarianceScaling(scale=2))(x)
        x = Conv2D(64, kernel_size=9, strides=1, padding="same", activation="relu",
                   kernel_initializer=VarianceScaling(scale=2))(x)
        x = BatchNormalization()(x)

        # Reconstruire les features dans le SR
        y = Conv2D(64, kernel_size=5, strides=1, padding="same", activation="relu",
                   kernel_initializer=VarianceScaling(scale=2))(x)
        y = Conv2D(64, kernel_size=3, strides=1, padding="same", activation="relu",
                   kernel_initializer=VarianceScaling(scale=2))(y)
        y = Conv2D(64, kernel_size=3, strides=1, padding="same", activation="relu",
                   kernel_initializer=VarianceScaling(scale=2))(y)

        shortcut = Conv2D(64, 1, strides=1, activation="relu")(x)
        y = Add()([y, shortcut])
        y = BatchNormalization()(y)

        y = Conv2D(64, kernel_size=3, strides=1, padding="same", activation="relu",
                   kernel_initializer=VarianceScaling(scale=2))(y)

        # output_img va etre 1 fm si gray scale et 3 fm si couleur.
        output_img = Conv2D(3, kernel_size=5, strides=1, activation="relu", padding="same",
                            kernel_initializer=VarianceScaling(scale=2))(y)

        # fm = MaxPool2D(3, strides=1)(output_img)

        model = Model(inputs=[inputs], outputs=[output_img])  # outputs=[output_img, fm]

        return model
    def conv2d_bn(self,
                  x,
                  filter_size,
                  kernel_size,
                  padding_type,
                  activation_type,
                  strides=(1, 1)):
        weight = 5e-4
        x = Conv2D(filters=filter_size,
                   kernel_size=kernel_size,
                   strides=strides,
                   kernel_regularizer=l2(weight),
                   kernel_initializer=VarianceScaling(scale=2.0,
                                                      mode='fan_in',
                                                      distribution='normal',
                                                      seed=None),
                   padding=padding_type,
                   activation='linear')(x)

        if activation_type == 'LeakyRelu':
            x = LeakyReLU(alpha=0.3)(x)
        else:
            x = Activation(activation_type)(x)
        x = BatchNormalization(axis=-1)(x)
        return x
def model_init(opts):
    """Simple sequential image-to-image convolutional neural network"""

    init_fn = VarianceScaling(2.)

    model = Sequential()
    isFirst = True
    for ks, nk, a in iters.izip(opts.kernelSizes, opts.numKernels,
                                opts.activations):

        if isFirst:
            model.add(
                layers.Conv2D(nk,
                              kernel_size=ks,
                              strides=opts.strides,
                              padding=opts.padding,
                              kernel_initializer=init_fn,
                              input_shape=opts.inputShape))
            isFirst = False
        else:
            model.add(
                layers.Conv2D(nk,
                              kernel_size=ks,
                              strides=opts.strides,
                              padding=opts.padding,
                              kernel_initializer=init_fn))

        if opts.includeInsNormLayer:
            model.add(InstanceNormalization(axis=opts.insNormAxis))

        model.add(layers.Activation(a))
        if opts.dropRate > 0.0:
            model.add(layers.Dropout(rate=opts.dropRate))
    return model
예제 #6
0
    def G_mapping(self):
        with K.name_scope('G_mapping'):
            latents_in = Input(batch_shape=(None, self.latent_size))
            inputs = [latents_in]
            if self.label_size > 0:
                labels_in = Input(batch_shape=(None, self.label_size))
                y = Dense(self.label_size,
                          use_bias=False)(labels_in)  # [batch, label_size]
                x = Concatenate(axis=1)([latents_in,
                                         y])  # [batch, latent_size+label_size]
                inputs.append(labels_in)
            else:
                x = latents_in
            with K.name_scope('Dense_mapping'):
                # according to the paper, all G_mapping dense should use lr with 0.01 decay
                for i in range(self.mapping_layers):
                    x = SetLearningRate(Dense(
                        self.latent_size,
                        kernel_initializer=VarianceScaling(math.sqrt(2))),
                                        lamb=self.mapping_lrmul)(x)
                    x = LeakyReLU(0.2)(x)

            x = RepeatVector(self.model_num * 2)(
                x)  # [batch, dim]->[batch, layer*2, dim]

            # truncation trick
            # x = TruncationLayer(self.model_num * 2, self.latent_size, self.truncation_psi,
            #                     self.truncation_cutoff, self.dlatent_avg_beta)(x)

        return Model(inputs=inputs, outputs=x)
예제 #7
0
def main():
    data = read_data()
    data.resize(len(data), 256 * 3)
    # temporary
    n_cluster = 5
    update_interval = 13
    pretrain_epochs = 30
    batch_size = 10
    init = VarianceScaling(scale=1. / 3.,
                           mode='fan_in',
                           distribution="uniform")
    pretrain_optimizer = Adam()
    dec = DEC(dims=[256 * 3, 500, 500, 2000, 10],
              n_cluster=n_cluster,
              init=init)

    dec.pretrain(x=data,
                 optimizer=pretrain_optimizer,
                 epochs=pretrain_epochs,
                 batch_size=batch_size)
    dec.model.summary()
    t0 = time()
    dec.compile(optimizer=Adam(), loss='kld')
    y_pred = dec.fit(data,
                     maxiter=200,
                     batch_size=batch_size,
                     update_interval=update_interval)
    pd.DataFrame(y_pred).to_csv("processed_data/dec_cluster_result.csv",
                                index=False,
                                mode='w',
                                sep=',',
                                header=False)
    print('clustering time', (time() - t0))
예제 #8
0
 def res_block(self, inp, filters, kernel_size=3, padding="same", **kwargs):
     """ Residual block """
     logger.debug("inp: %s, filters: %s, kernel_size: %s, kwargs: %s)", inp,
                  filters, kernel_size, kwargs)
     var_x = LeakyReLU(alpha=0.2)(inp)
     if self.use_reflect_padding:
         var_x = ReflectionPadding2D(stride=1,
                                     kernel_size=kernel_size)(var_x)
         padding = "valid"
     var_x = self.conv2d(inp,
                         filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         **kwargs)
     var_x = LeakyReLU(alpha=0.2)(var_x)
     if self.use_reflect_padding:
         var_x = ReflectionPadding2D(stride=1,
                                     kernel_size=kernel_size)(var_x)
         padding = "valid"
     original_init = self.switch_kernel_initializer(
         kwargs,
         VarianceScaling(scale=0.2, mode="fan_in", distribution="uniform"))
     var_x = self.conv2d(var_x,
                         filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         force_initializer=True,
                         **kwargs)
     self.switch_kernel_initializer(kwargs, original_init)
     var_x = Add()([var_x, inp])
     var_x = LeakyReLU(alpha=0.2)(var_x)
     return var_x
def model_build(in_, k):

    model = SeparableConv2D(8,
                            kernel_size=(1, 1),
                            strides=(1, 1),
                            padding="same",
                            kernel_initializer=VarianceScaling(
                                scale=1,
                                mode="fan_in",
                                distribution="normal",
                                seed=None),
                            bias_initializer="zeros")(in_)
    model = BatchNormalization()(model)
    model = ReLU()(model)

    horizontal_branch = branch_(model, (1, k))
    vertical_branch = branch_(model, (k, 1))

    model = Concatenate(axis=-1)([horizontal_branch, vertical_branch])

    model = Dense(64, activation="relu")(model)
    model = Dropout(0.5)(model)

    model = Dense(128, activation="relu")(model)
    model = Dropout(0.5)(model)

    model = Dense(256, activation="relu")(model)
    model = Dropout(0.5)(model)

    model = Dense(3, activation="softmax")(model)

    return model
	def build_discriminator(self, discriminator_lr):
		model = Sequential()
		fc1 = Dense(50, input_shape=(self.state_dimension + self.num_actions ,), activation='relu',
					kernel_initializer=VarianceScaling(mode='fan_avg',
													   distribution='normal'))
		fc2 = Dense(50, activation='relu',
					kernel_initializer=VarianceScaling(mode='fan_avg',
													   distribution='normal'))
		fc3 = Dense(1, activation='sigmoid',
					kernel_initializer=VarianceScaling(mode='fan_avg',
													   distribution='normal'))
		model.add(fc1)
		model.add(fc2)
		model.add(fc3)
		model.compile(optimizer=Adam(lr=self.discriminator_lr) , loss='binary_crossentropy', metrics=['accuracy'])
		self.discriminator = model
예제 #11
0
 def upsample(x, number):
     x = Conv2D(256, kernel_size=3, strides=1, padding='same', 
     kernel_initializer=VarianceScaling(scale=varscale, mode='fan_in', distribution='normal', seed=None),
     name='upSample_Conv2d_'+str(number))(x)
     x = self.SubpixelConv2D('upSample_SubPixel_'+str(number), 2)(x)
     x = PReLU(shared_axes=[1,2], name='upSample_PReLU_'+str(number))(x)
     return x
예제 #12
0
파일: ConvDEC.py 프로젝트: justKidrauhl/DDC
def CAE(input_shape=(28, 28, 1), filters=[32, 64, 128, 10]):
    model = Sequential()
    if input_shape[0] % 8 == 0:
        pad3 = 'same'
    else:
        pad3 = 'valid'
    init = VarianceScaling(scale=1. / 3., mode='fan_in', distribution='uniform')
    model.add(InputLayer(input_shape))
    model.add(Conv2D(filters[0], 5, strides=2, padding='same', activation='relu', name='conv1'))

    model.add(Conv2D(filters[1], 5, strides=2, padding='same', activation='relu', name='conv2'))

    model.add(Conv2D(filters[2], 3, strides=2, padding=pad3, activation='relu', name='conv3'))

    model.add(Flatten())
    model.add(Dense(units=filters[3], name='embedding'))
    model.add(Dense(units=filters[2]*int(input_shape[0]/8)*int(input_shape[0]/8), activation='relu'))

    model.add(Reshape((int(input_shape[0]/8), int(input_shape[0]/8), filters[2])))
    model.add(Conv2DTranspose(filters[1], 3, strides=2, padding=pad3, activation='relu', name='deconv3'))

    model.add(Conv2DTranspose(filters[0], 5, strides=2, padding='same', activation='relu', name='deconv2'))

    model.add(Conv2DTranspose(input_shape[2], 5, strides=2, padding='same', name='deconv1'))
    encoder = Model(inputs=model.input, outputs=model.get_layer('embedding').output)
    return model, encoder
예제 #13
0
	def __init__(self, state_size, pretrained=False, model_name=None):
		'''agent config'''
		self.state_size = state_size    	# normalized previous days
		self.action_size = 3           		# [sit, buy, sell]
		self.model_name = model_name
		self.inventory = []
		self.memory = deque(maxlen=1000)
		self.first_iter = True
		
		'''model config'''
		self.model_name = model_name
		self.gamma = 0.95
		self.epsilon = 1.0
		self.epsilon_min = 0.01
		self.epsilon_decay = 0.995
		self.learning_rate = 0.001
		self.loss = huber_loss
		self.custom_objects = {'huber_loss': huber_loss}	# important for loading the model from memory
		self.optimizer = RMSprop(lr=self.learning_rate)
		self.initializer = VarianceScaling()

		'''load pretrained model'''
		if pretrained and self.model_name is not None:
			self.model = self.load()
		else:
			self.model = self._model()
 def build(self, input_shape):
     # input_shape: [(None, ?, 128), (None, ?, 128)]
     init = VarianceScaling(scale=1.0,
                            mode='fan_avg',
                            distribution='uniform')
     self.W0 = self.add_weight(name='W0',
                               shape=(input_shape[0][-1], 1),
                               initializer=init,
                               regularizer=l2(cf.L2_LAMBDA),
                               trainable=True)
     self.W1 = self.add_weight(name='W1',
                               shape=(input_shape[1][-1], 1),
                               initializer=init,
                               regularizer=l2(cf.L2_LAMBDA),
                               trainable=True)
     self.W2 = self.add_weight(name='W2',
                               shape=(1, 1, input_shape[0][-1]),
                               initializer=init,
                               regularizer=l2(cf.L2_LAMBDA),
                               trainable=True)
     self.bias = self.add_weight(name='linear_bias',
                                 shape=([1]),
                                 initializer='zero',
                                 regularizer=l2(cf.L2_LAMBDA),
                                 trainable=True)
     super(ContextQueryAttention, self).build(input_shape)
예제 #15
0
 def create_actor_network(self, state_size,action_dim):
     print("Now we build the model")
     S = Input(shape=[state_size])
     h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
     #Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     #Steering = Dense(1,activation='tanh',init=lambda shape:VarianceScaling(scale=1e-4)(shape))(h1)
     Steering = Dense(1,activation='tanh',kernel_initializer=VarianceScaling(scale=1e-2))(h1)
     #Acceleration = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     Acceleration = Dense(1,activation='sigmoid',kernel_initializer=VarianceScaling(scale=1e-2))(h1)
     #Brake = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
     Brake = Dense(1,activation='sigmoid',kernel_initializer =VarianceScaling(scale=1e-2))(h1)
     # V = merge([Steering,Acceleration,Brake],mode='concat')
     V = concatenate([Steering,Acceleration,Brake],axis=-1)
     model = Model(input=S,output=V)
     return model, model.trainable_weights, S
예제 #16
0
 def res_block(self, inp, filters, kernel_size=3, padding= 'same', **kwargs):
     """ Residual block """
     logger.debug("inp: %s, filters: %s, kernel_size: %s, kwargs: %s)",
                  inp, filters, kernel_size, kwargs)
     kwargs = self.update_kwargs(kwargs)
     var_x = LeakyReLU(alpha=0.2)(inp)
     if self.use_reflect_padding:
         var_x = ReflectionPadding2D(stride=1, kernel_size=kernel_size)(var_x)
         padding = 'valid'
     var_x = Conv2D(filters,
                    kernel_size=kernel_size,
                    padding=padding,
                    **kwargs)(var_x)
     var_x = LeakyReLU(alpha=0.2)(var_x)
     if self.use_reflect_padding:
         var_x = ReflectionPadding2D(stride=1, kernel_size=kernel_size)(var_x)
         padding = 'valid'
     temp = kwargs["kernel_initializer"]
     kwargs["kernel_initializer"] = VarianceScaling(scale=0.2,
                                                    mode='fan_in',
                                                    distribution='uniform')
     var_x = Conv2D(filters,
                    kernel_size=kernel_size,
                    padding=padding,
                    **kwargs)(var_x)
     kwargs["kernel_initializer"] = temp
     var_x = Add()([var_x, inp])
     var_x = LeakyReLU(alpha=0.2)(var_x)
     return var_x
예제 #17
0
def build_model(inputs, kernel_size, n_classes):
    '''
    This function is used to stack the residual units on top of one another to build the
    network

    We assume kernel size to be the same for all layers.

    inputs: represents the batch of input images
    kernel_size: represents the kernel dimensions which remain unchanged throughout the network
    n_classes: number of classes in the classification task
    '''
    x = Conv2D(4,
               kernel_size=kernel_size,
               strides=(1, 1),
               padding="same",
               kernel_initializer=VarianceScaling(scale=1,
                                                  mode="fan_in",
                                                  distribution="normal",
                                                  seed=None),
               bias_initializer="zeros")(inputs)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = MaxPooling2D()(x)

    x = residual_unit(x, 8, 8, kernel_size)
    x = MaxPooling2D()(x)

    x = residual_unit(x, 8, 8, kernel_size)
    x_shape = K.int_shape(x)
    x = AveragePooling2D(pool_size=(x_shape[1], x_shape[2]))(x)

    x = Flatten()(x)
    x = Dense(n_classes, activation="softmax")(x)

    return x
        def build_actor_model(self, actor_lr):
		model = Sequential()
		fc1 = Dense(50, input_shape=(self.state_dimension,), 
			activation='relu',
			kernel_initializer=VarianceScaling(mode='fan_avg',
			distribution='normal'), kernel_regularizer=regularizers.l2(0.01))
		fc2 = Dense(50, activation='relu',
			kernel_initializer=VarianceScaling(mode='fan_avg',
			distribution='normal'), kernel_regularizer=regularizers.l2(0.01))
		fc3 = Dense(self.num_actions, activation='softmax',
			kernel_initializer=VarianceScaling(mode='fan_avg',
			distribution='normal'), kernel_regularizer=regularizers.l2(0.01))
		model.add(fc1)
		model.add(fc2)
		model.add(fc3)
		model.compile(loss='mse', optimizer=Adam(lr=self.actor_lr))
		self.actor_model = model
예제 #19
0
    def build_network(self):
        # CNN
        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=8,
                   strides=(4, 4),
                   activation='relu',
                   data_format='channels_first',
                   input_shape=self.input_dimension,
                   kernel_initializer=VarianceScaling(scale=2.0)))
        model.add(
            Conv2D(64,
                   kernel_size=4,
                   strides=(2, 2),
                   activation='relu',
                   data_format='channels_first',
                   input_shape=self.input_dimension,
                   kernel_initializer=VarianceScaling(scale=2.0)))
        model.add(
            Conv2D(
                64,
                kernel_size=3,
                strides=(1, 1),
                activation='relu',
                data_format='channels_first',
                input_shape=self.input_dimension,
            ))

        model.add(Flatten())
        model.add(
            Dense(512,
                  activation='relu',
                  kernel_initializer=VarianceScaling(scale=2.0)))
        model.add(
            Dense(units=self.total_action,
                  kernel_initializer=VarianceScaling(scale=2.0)))

        if self.load_path is not None:
            model.load_weights(self.load_path)

        model.compile(RMSprop(self.learning_rate),
                      loss='mean_squared_error',
                      metrics=['acc'])
        model.summary()
        return model
def build_actions_model(history_size=4):
    model = Sequential()
    model.add(
        Dense(units=history_size * 6,
              input_shape=(history_size * 6, ),
              activation='relu',
              kernel_initializer=VarianceScaling()))
    return model
예제 #21
0
 def create_actor_network(self, state_size, action_dim):
     S = Input(shape=[state_size])
     h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
     h1 = Dense(HIDDEN2_UNITS, activation='linear')(h0)
     Pitch = Dense(
         1,
         activation='tanh',
         kernel_initializer=lambda shape: VarianceScaling(scale=1e-4)
         (shape))(h1)
     Yaw = Dense(
         1,
         activation='tanh',
         kernel_initializer=lambda shape: VarianceScaling(scale=1e-4)
         (shape))(h1)
     V = Concatenate()([Pitch, Yaw])
     model = Model(inputs=S, outputs=V)
     return model, model.trainable_weights, S
    def __init__(self, filter_num, stride=1):
        super(BasicBlock, self).__init__()

        self.conv1 = layers.Conv2D(filter_num, (3,3), strides=stride,
                                   kernel_initializer=VarianceScaling(),
                                   kernel_regularizer=keras.regularizers.l2(
                                       0.0005),
                                   padding='same')
        self.bn1 = layers.BatchNormalization()
        self.relu = layers.Activation('relu')

        self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1,
                                   kernel_initializer=VarianceScaling(),
                                   kernel_regularizer=keras.regularizers.l2(
                                       0.0005),
                                   padding='same')
        self.bn2 = layers.BatchNormalization()
예제 #23
0
 def __init__(self, arg):
     self.arg = arg
     self.nb_actions = arg.nb_actions
     self.state_size = arg.state_size
     self.rint = VarianceScaling(scale=1e-4,
                                 mode='fan_in',
                                 distribution='normal',
                                 seed=None)
예제 #24
0
파일: blocks.py 프로젝트: veugene/fcn_maker
def vnet_block(filters, num_conv=3, subsample=False, upsample=False,
               upsample_mode='conv', skip=True, dropout=0., normalization=None,
               norm_kwargs=None,
               init=VarianceScaling(scale=3., mode='fan_avg'),
               weight_decay=None, nonlinearity='relu', ndim=3, name=None):
    name = _get_unique_name('vnet_block', name)
    if norm_kwargs is None:
        norm_kwargs = {}
    def f(input):
        output = input
        if subsample:
            output = Convolution(filters=filters,
                                 kernel_size=2,
                                 strides=2,
                                 ndim=ndim,
                                 kernel_initializer=init,
                                 padding='same',
                                 kernel_regularizer=_l2(weight_decay),
                                 name=name+"_downconv")(output)
        for i in range(num_conv):
            output = norm_nlin_conv(filters,
                                    kernel_size=5,
                                    normalization=normalization,
                                    weight_decay=weight_decay,
                                    norm_kwargs=norm_kwargs,
                                    init=init,
                                    nonlinearity=nonlinearity,
                                    ndim=ndim,
                                    name=name)(output)
        
            if dropout > 0:
                output = get_dropout(dropout, nonlinearity)(output)
        if skip:
            output = _shortcut(input, output,
                               subsample=subsample,
                               upsample=False,
                               upsample_mode=upsample_mode,
                               weight_decay=weight_decay,
                               init=init,
                               ndim=ndim,
                               name=name)
        if upsample:
            # "up-convolution" also halves the number of feature maps.
            if normalization is not None:
                output = normalization(name=name+"_norm", **norm_kwargs)(output)
            output = get_nonlinearity(nonlinearity)(output)
            output = _upsample(output,
                               mode=upsample_mode,
                               ndim=ndim,
                               filters=filters//2,
                               kernel_size=2,
                               kernel_initializer=init,
                               kernel_regularizer=_l2(weight_decay),
                               name=name+"_upconv")
            output = get_nonlinearity(nonlinearity)(output)
        return output

    return f
예제 #25
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')
        #  net = layers.BatchNormalization()(states)

        w_init = VarianceScaling(scale=1. / 3,
                                 mode='fan_in',
                                 distribution='uniform')
        # Add hidden layers
        net = layers.Dense(units=400,
                           kernel_initializer=w_init,
                           bias_initializer=w_init,
                           activation='relu')(states)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.5)(net)

        net = layers.Dense(units=300,
                           kernel_initializer=w_init,
                           bias_initializer=w_init,
                           activation='relu')(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.5)(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Add final output layer with sigmoid activation

        w_init = RandomUniform(-3e-3, 3e-3)
        raw_actions = layers.Dense(units=self.action_size,
                                   kernel_initializer=w_init,
                                   bias_initializer=w_init,
                                   activation='tanh',
                                   name='raw_actions')(net)

        # Scale [-1, 1] output for each action dimension to proper range
        actions = layers.Lambda(
            lambda x: (x * self.action_range / 2.0) + self.action_low,
            name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        # loss = K.mean(-action_gradients * actions)
        loss = K.mean(action_gradients * actions)
        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=self.lr, decay=self.decay)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
def residual_block(model, f_in, f_out, k):

    shortcut = model

    model = SeparableConv2D(f_in,
                            kernel_size=k,
                            strides=(1, 1),
                            padding="same",
                            kernel_initializer=VarianceScaling(
                                scale=1,
                                mode="fan_in",
                                distribution="normal",
                                seed=None),
                            bias_initializer="zeros")(model)
    model = BatchNormalization()(model)
    model = ELU()(model)

    model = SeparableConv2D(f_out,
                            kernel_size=k,
                            strides=(1, 1),
                            padding="same",
                            kernel_initializer=VarianceScaling(
                                scale=1,
                                mode="fan_in",
                                distribution="normal",
                                seed=None),
                            bias_initializer="zeros")(model)
    model = BatchNormalization()(model)

    if f_in != f_out:
        shortcut = SeparableConv2D(f_out,
                                   kernel_size=k,
                                   strides=(1, 1),
                                   padding="same",
                                   kernel_initializer=VarianceScaling(
                                       scale=1,
                                       mode="fan_in",
                                       distribution="normal",
                                       seed=None),
                                   bias_initializer="zeros")(shortcut)
        shortcut = BatchNormalization()(shortcut)

    model = Add()([model, shortcut])
    model = ReLU()(model)
    return model
예제 #27
0
def autoencoder(dims, act='relu', plot_out=False):  ##TODO 我还不会在win上,plot_out
    """
    Fully connected auto-encoder model, symmetric.
    Arguments:
        dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
            The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
        act: activation, not applied to Input, Hidden and Output layers
    return:
        (ae_model, encoder_model), Model of autoencoder and model of encoder
    """

    n_stacks = len(dims) - 1
    init = VarianceScaling(scale=1. / 3.,
                           mode='fan_in',
                           distribution='uniform')

    # input
    x = Input(shape=(dims[0], ), name='input')
    h = x

    # internal layers in encoder
    for i in range(n_stacks - 1):
        h = Dense(dims[i + 1],
                  activation='sigmoid',
                  kernel_initializer=init,
                  name='encoder_%d' % i)(h)

    # hidden layer
    h = Dense(dims[-1],
              kernel_initializer=init,
              name='encoder_%d' % (n_stacks - 1),
              activity_regularizer=regularizers.l1(1e-5),
              activation='sigmoid')(h)
    ##在隐藏层这里,我添加了activity_regularizer=regularizers.l1(1e-5),activation='sigmoid')
    ## 这是全连接层的官方文档   http://keras-cn.readthedocs.io/en/latest/layers/core_layer/
    ##这是当时看到的文章    https: // blog.csdn.net / u012969412 / article / details / 70882296
    y = h
    # internal layers in decoder
    for i in range(n_stacks - 1, 0, -1):
        y = Dense(dims[i],
                  activation=act,
                  kernel_initializer=init,
                  name='decoder_%d' % i)(y)

    # output
    y = Dense(dims[0], kernel_initializer=init, name='decoder_0')(y)
    AE = Model(inputs=x, outputs=y, name='AE')  ##TODO 这个地方还不清楚
    Encoder = Model(inputs=x, outputs=h, name='encoder')
    ##这里我把之前的return Model(inputs=x, outputs=y, name='AE'), Model(inputs=x, outputs=h, name='encoder')换成了这样的,看着方便
    if plot_out:  ##画图
        from keras.utils import plot_model
        plot_model(AE, 'AE.png', show_shapes=True, show_layer_names=True)
        plot_model(Encoder,
                   'Encoder.png',
                   show_shapes=True,
                   show_layer_names=True)
    return AE, Encoder
 def conv_layer(_inputs, _filters, _kernel_size, _strides, _name):
     return Conv2D(filters=_filters,
                   kernel_size=_kernel_size,
                   strides=_strides,
                   kernel_initializer=VarianceScaling(scale=2.0),
                   padding="valid",
                   activation=relu,
                   use_bias=False,
                   name=_name)(_inputs)
예제 #29
0
def he_normal_scaled(scale):
    """He normal variance scaling initializer.

    # References
        He et al., http://arxiv.org/abs/1502.01852
    """
    return VarianceScaling(scale=2. * scale,
                           mode='fan_in',
                           distribution='normal')
	def build_expert_model(self):
                model = Sequential()
		fc1 = Dense(self.hidden_size, input_shape=(self.state_dimension,), activation='relu',
					kernel_initializer=VarianceScaling(mode='fan_avg', distribution='normal'),
					kernel_regularizer=regularizers.l2(self.reg_cost))
		fc2 = Dense(self.hidden_size, activation='relu',
					kernel_initializer=VarianceScaling(mode='fan_avg', distribution='normal'),
					kernel_regularizer=regularizers.l2(self.reg_cost))

		fc3 = Dense(self.num_actions, activation='linear',
					kernel_initializer=VarianceScaling(mode='fan_avg', distribution='normal'),
					kernel_regularizer=regularizers.l2(self.reg_cost))
		model.add(fc1)
		model.add(fc2)
		model.add(fc3)
                #self.expert.model_from_json(self.expert_path)
                model.load_weights(self.expert_weights)
                return model