Example #1
0
    def __call__(self, shape, dtype=None):
        # print("PWMKernelInitializer shape: ", shape)

        pwm = pwm_list2pwm_array(self.pwm_list, shape, dtype,
                                 self.background_probs)

        if self.add_noise_before_Pwm2Pssm:
            # add noise with numpy truncnorm function
            pwm = _truncated_normal(mean=pwm,
                                    stddev=self.stddev,
                                    seed=self.seed)

            pssm = pwm_array2pssm_array(pwm,
                                        background_probs=self.background_probs)

            # Force sttdev to be 0, because noise already added. May just use tf.Variable(pssm)
            # return K.Variable(pssm) # this raise error
            return K.truncated_normal(shape,
                                      mean=pssm,
                                      stddev=0,
                                      dtype=dtype,
                                      seed=self.seed)
        else:
            pssm = pwm_array2pssm_array(pwm,
                                        background_probs=self.background_probs)
            return K.truncated_normal(shape,
                                      mean=pssm,
                                      stddev=self.stddev,
                                      dtype=dtype,
                                      seed=self.seed)
def neural_network_model(data):

    # Smaller initial values making training smoother
    hidden_1_layer = {
        'weights':
        K.variable(K.truncated_normal([784, n_nodes_hl1], stddev=0.1)),
        'biases': K.variable(K.constant(0.1, shape=[n_nodes_hl1]))
    }

    hidden_2_layer = {
        'weights':
        K.variable(K.truncated_normal([n_nodes_hl1, n_nodes_hl2], stddev=0.1)),
        'biases':
        K.variable(K.constant(0.1, shape=[n_nodes_hl2]))
    }

    hidden_3_layer = {
        'weights':
        K.variable(K.truncated_normal([n_nodes_hl2, n_nodes_hl3], stddev=0.1)),
        'biases':
        K.variable(K.constant(0.1, shape=[n_nodes_hl3]))
    }

    output_layer = {
        'weights':
        K.variable(K.truncated_normal([n_nodes_hl3, n_classes], stddev=0.1)),
        'biases':
        K.variable(K.constant(0.1, shape=[n_classes]))
    }

    # Matmul is matrix multiplication

    l1 = tf.add(K.dot(data, hidden_1_layer['weights']),
                hidden_1_layer['biases'])
    # Passes through activation function
    # Official definiton: Computes rectified linear and returns a Tensor
    l1 = tf.nn.relu(l1)

    #                 input data
    #                     √
    l2 = tf.add(K.dot(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
    l2 = tf.nn.relu(l2)

    l3 = tf.add(K.dot(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
    l3 = tf.nn.relu(l3)

    output = K.dot(l3, output_layer['weights']) + output_layer['biases']

    return output
Example #3
0
def spectral_norm(w, iteration=1):
    #From "Spectral Normalization for GANs" paper
    #https://arxiv.org/pdf/1802.05957.pdf

    w_shape = w.shape.as_list()
    w = K.reshape(w, [-1, w_shape[-1]])

    u = K.truncated_normal([1, w_shape[-1]])

    u_hat = u
    v_hat = None
    for i in range(
            iteration):  #power iteration, usually iteration = 1 will be enough
        v_ = K.dot(u_hat, K.transpose(w))
        v_hat = l2_norm(v_)

        u_ = K.dot(v_hat, w)
        u_hat = l2_norm(u_)

    sigma = K.dot(K.dot(v_hat, w), K.transpose(u_hat))
    w_norm = w / sigma

    w_norm = K.reshape(w_norm, w_shape)

    return w_norm
Example #4
0
    def __call__(self, shape, dtype=None):
        from keras.initializers import _compute_fans

        fan_in, fan_out = _compute_fans(shape)
        scale = self.scale
        if self.mode == 'fan_in':
            scale /= max(1., fan_in)
        elif self.mode == 'fan_out':
            scale /= max(1., fan_out)
        else:
            scale /= max(1., float(fan_in + fan_out) / 2)
        if self.distribution == 'normal':
            # 0.879... = scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
            stddev = np.sqrt(scale) / .87962566103423978
            init = K.truncated_normal(shape,
                                      0.,
                                      stddev,
                                      dtype=dtype,
                                      seed=self.seed)
        else:
            limit = np.sqrt(3. * scale)
            init = K.random_uniform(shape,
                                    -limit,
                                    limit,
                                    dtype=dtype,
                                    seed=self.seed)

        return init * self.tree_weight
Example #5
0
 def __call__(self, shape, dtype=None):
     # print("PWMKernelInitializer shape: ", shape)
     return K.truncated_normal(shape,
                               mean=pwm_list2pwm_array(
                                   self.pwm_list, shape, dtype),
                               stddev=self.stddev,
                               dtype=dtype,
                               seed=self.seed)
Example #6
0
 def rand_weight_like(weight):
     assert K.image_data_format(
     ) == "channels_last", "support channels last, but you are {}".format(
         K.image_data_format())
     kw, kh, num_channel, filters = weight.shape
     kvar = K.truncated_normal((kw, kh, num_channel, filters), 0, 0.05)
     w = K.eval(kvar)
     b = np.zeros((filters, ))
     return w, b
Example #7
0
 def __call__(self, shape, dtype=None):
     x = K.truncated_normal(shape,
                            self.mean,
                            self.stddev,
                            dtype=dtype,
                            seed=self.seed)
     if self.seed is not None:
         self.seed += 1
     return x
Example #8
0
    def call(self, x):
        x = Permute([2, 1])(x)
        k = K.variable(K.truncated_normal(shape=(20, 50, 300)))
        #返回具有截尾正太分布值的向量,在距离均值两个标准差之外的数据将会被截断并重新生成
        x = K.conv1d(x, k, padding='same')
        x = K.max(x, axis=1)

        # x = keras.backend.permute_dimensions(x, [0, 3, 2, 1])
        return x
Example #9
0
	def call(self, x):
		axis = (1,2) # height, widtt -> y,x
		conv0 = K.conv2d(x, self.kernel, padding="same")
		conv1 = K.conv2d(conv0, self.kernel, padding="same")
		dilatedConv0 = dilateTensor(conv0, axis, 0, 0)
		dilatedConv1 = dilateTensor(conv1, axis, 1, 1)
		conv1 = Add([dilatedConv0, dilatedConv1])
		
		
		shape = list(self.kernel_size) + [self.out_dim, self.out_dim]
		weights = K.truncated_normal(shape)
		conv2 = K.conv2d(conv1, weights, padding="same")
		
		output = Add([conv1, conv2])
		output = K.relu(output)
		
		return output
Example #10
0
def normal_init(shape):
    return K.truncated_normal(shape, stddev=0.1)
def weight_variable(shape):
    return K.truncated_normal(shape, stddev = 0.01)
def initializer_he(shape, dtype=None):
    '''
    He et al. initialization from https://arxiv.org/pdf/1502.01852.pdf
    '''
    return K.truncated_normal(shape, dtype=dtype) * K.sqrt(K.constant(2. / float(shape[0])))
Example #13
0
 def weight_variable(shape):
     return backend.truncated_normal(shape, stddev=0.1)
Example #14
0
 def f(shape, dtype=None):
     return K.truncated_normal(shape, stddev=stddev, dtype=dtype)