def build(self, input_shape):
        w_shape = [input_shape[3], input_shape[3]]  # [n_channel, n_channel]

        # Sample a random orthogonal matrix:
        w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype('float32')
        self.rotate_matrix = self.add_weight(
            "rotate_matrix",
            w_shape,
            initializer=initializers.constant(w_init),
            trainable=True)
        # debug
        self.determinant = K.variable(value=np.linalg.det(w_init),
                                      name='determinant')

        # add log-det as loss
        log_det_factor = int(input_shape[1] * input_shape[2])
        # log_det = tf.log(tf.abs(tf.matrix_determinant(self.rotate_matrix)))
        # log_det = tf.log(tf.abs(tf.matrix_determinant(self.rotate_matrix)) + K.epsilon())
        # TODO: is it bad to use clip_by_value?
        # log_det = tf.log(tf.clip_by_value(tf.abs(tf.matrix_determinant(self.rotate_matrix)), 0.001, 1000))
        log_det = tf.log(tf.abs(tf.matrix_determinant(self.rotate_matrix)))
        self.add_loss(-1 * log_det_factor * log_det *
                      self.bit_per_sub_pixel_factor)

        # self.add_update([K.update(self.determinant, tf.matrix_determinant(self.rotate_matrix))])
        # final
        super().build(input_shape)
Example #2
0
def sideBranch(input, factor):
    Con1 = layers.Conv2D(1, (1, 1),
                         activation=None,
                         padding='SAME',
                         kernel_regularizer=l2(0.00001))(input)
    kernelSize = (2 * factor, 2 * factor)
    initWeight = upsampling_bilinear(factor, 1, 1)
    # initWeight = tf.initializers.Constant(value=initWeight)
    initWeight = initializers.constant(value=initWeight)
    # initializer = tf.constant_initializer(value=initWeight)
    # initWeight = tf.Variable(initial_value=initializer(shape=kernelSize, dtype=tf.float32))
    # p = layers.Conv2DTranspose(1, kernelSize, strides=factor, padding='SAME', use_bias=False, activation=None, weights=initWeight)
    # p.set_weights(initWeight)
    # t = p.weights

    DeCon = layers.Conv2DTranspose(1,
                                   kernelSize,
                                   strides=factor,
                                   padding='SAME',
                                   use_bias=False,
                                   activation=None,
                                   kernel_initializer=initWeight,
                                   kernel_regularizer=l2(0.00001))(Con1)
    # test = p.weights
    return DeCon
Example #3
0
    def __init__(self,
                 step_dim,
                 ll,
                 get_alpha=False,
                 get_sequence=False,
                 W_regularizer=None,
                 b_regularizer=None,
                 L_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 L_constraint=None,
                 bias=False,
                 **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')
        self.l_init = initializers.constant(value=0.5)
        self.ll = ll
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.L_regularizer = regularizers.get(L_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.L_constraint = constraints.get(L_constraint)
        self.get_sequence = get_sequence
        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        self.get_alpha = get_alpha
        super(Test_IAN, self).__init__(**kwargs)
Example #4
0
    def __init__(self, n_outputs=2, input_shape=(16, ), init_value=2):
        """Constructor.

    Args:
      n_outputs: number of output neurons
      input_shape:
      init_value:
    """
        super(TestModel, self).__init__(name='bolton', dynamic=False)
        self.n_outputs = n_outputs
        self.layer_input_shape = input_shape
        self.output_layer = tf.keras.layers.Dense(
            self.n_outputs,
            input_shape=self.layer_input_shape,
            kernel_regularizer=L1L2(l2=1),
            kernel_initializer=constant(init_value),
        )
Example #5
0
    def build(self, input_shape: TensorShape):
        if self.prior_memory_mean is None:
            self.build_prior_state()

        self._code_size = tf.constant(self.code_size, name="code_size")
        self._memory_size = tf.constant(self.memory_size, name="memory_size")
        self._iteration_count = tf.constant(self.iteration_count,
                                            name="iteration_count")
        if self.batch_size is not None:
            self._batch_size = tf.constant(self.batch_size, name="batch_size")

        # region Address weights
        with tf.name_scope("w_prior"):
            self._w_prior_stddev = tf.constant(self.w_prior_stddev,
                                               name="w_prior_stddev")

            self.w_prior_distribution = MultivariateNormalDiag(
                loc=tf.zeros(shape=[self._memory_size]),
                scale_identity_multiplier=self._w_prior_stddev,
                name="w_prior_distribution")

        log_w_stddev = self.add_weight(initializer=constant(
            self.initial_w_stddev),
                                       name="log_w_stddev",
                                       shape=[])
        self._w_stddev = tf.exp(log_w_stddev, name="w_stddev")
        # endregion

        # region Observational noise
        if self.observational_noise_stddev > 0.0:
            observational_noise_stddev = tf.constant(
                self.observational_noise_stddev,
                name="observational_noise_stddev")
        else:
            log_observational_noise_stddev = self.add_weight(
                initializer=zeros(),
                name="log_observational_noise_stddev",
                shape=[])
            observational_noise_stddev = tf.exp(
                log_observational_noise_stddev,
                name="observational_noise_stddev")
        self._observational_noise_stddev = observational_noise_stddev
        # endregion

        self.built = True
    def build(self, input_shape):
        assert len(input_shape) == 5, "The input Tensor should have shape=[None, input_height, input_width," \
                                      " input_num_capsule, input_num_atoms]"
        self.input_height = input_shape[1]
        self.input_width = input_shape[2]
        self.input_num_capsule = input_shape[3]
        self.input_num_atoms = input_shape[4]

        # Transform matrix
        if self.upsamp_type == 'subpix':
            self.W = self.add_weight(shape=[
                self.kernel_size, self.kernel_size, self.input_num_atoms,
                self.num_capsule * self.num_atoms * self.scaling * self.scaling
            ],
                                     initializer=self.kernel_initializer,
                                     name='W')
        elif self.upsamp_type == 'resize':
            self.W = self.add_weight(shape=[
                self.kernel_size, self.kernel_size, self.input_num_atoms,
                self.num_capsule * self.num_atoms
            ],
                                     initializer=self.kernel_initializer,
                                     name='W')
        elif self.upsamp_type == 'deconv':
            self.W = self.add_weight(shape=[
                self.kernel_size, self.kernel_size,
                self.num_capsule * self.num_atoms, self.input_num_atoms
            ],
                                     initializer=self.kernel_initializer,
                                     name='W')
        else:
            raise NotImplementedError(
                'Upsampling must be one of: "deconv", "resize", or "subpix"')

        self.b = self.add_weight(
            shape=[1, 1, self.num_capsule, self.num_atoms],
            initializer=initializers.constant(0.1),
            name='b')

        self.built = True
    def build(self, input_shape):
        assert len(input_shape) == 5, "The input Tensor should have shape=[None, input_height, input_width," \
                                      " input_num_capsule, input_num_atoms]"
        self.input_height = input_shape[1]
        self.input_width = input_shape[2]
        self.input_num_capsule = input_shape[3]
        self.input_num_atoms = input_shape[4]

        # Transform matrix
        self.W = self.add_weight(shape=[
            self.kernel_size, self.kernel_size, self.input_num_atoms,
            self.num_capsule * self.num_atoms
        ],
                                 initializer=self.kernel_initializer,
                                 name='W')

        self.b = self.add_weight(
            shape=[1, 1, self.num_capsule, self.num_atoms],
            initializer=initializers.constant(0.1),
            name='b')

        self.built = True
Example #8
0
    def structureModel(self):
        weightDecay = 0.00001
        Inputs = layers.Input(shape=self._inputShape,
                              batch_size=self._iBatchSize)
        Con1 = layers.Conv2D(64, (3, 3),
                             name='Con1',
                             activation='relu',
                             padding='SAME',
                             input_shape=self._inputShape,
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(Inputs)
        Con2 = layers.Conv2D(64, (3, 3),
                             name='Con2',
                             activation='relu',
                             padding='SAME',
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(Con1)
        Side1 = sideBranch(Con2, 1)
        MaxPooling1 = layers.MaxPooling2D((2, 2),
                                          name='MaxPooling1',
                                          strides=2,
                                          padding='SAME')(Con2)
        # outputs1
        Con3 = layers.Conv2D(128, (3, 3),
                             name='Con3',
                             activation='relu',
                             padding='SAME',
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(MaxPooling1)
        Con4 = layers.Conv2D(128, (3, 3),
                             name='Con4',
                             activation='relu',
                             padding='SAME',
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(Con3)
        Side2 = sideBranch(Con4, 2)
        MaxPooling2 = layers.MaxPooling2D((2, 2),
                                          name='MaxPooling2',
                                          strides=2,
                                          padding='SAME')(Con4)
        # outputs2
        Con5 = layers.Conv2D(256, (3, 3),
                             name='Con5',
                             activation='relu',
                             padding='SAME',
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(MaxPooling2)
        Con6 = layers.Conv2D(256, (3, 3),
                             name='Con6',
                             activation='relu',
                             padding='SAME',
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(Con5)
        Con7 = layers.Conv2D(256, (3, 3),
                             name='Con7',
                             activation='relu',
                             padding='SAME',
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(Con6)
        Side3 = sideBranch(Con7, 4)
        MaxPooling3 = layers.MaxPooling2D((2, 2),
                                          name='MaxPooling3',
                                          strides=2,
                                          padding='SAME')(Con7)
        # outputs3
        Con8 = layers.Conv2D(512, (3, 3),
                             name='Con8',
                             activation='relu',
                             padding='SAME',
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(MaxPooling3)
        Con9 = layers.Conv2D(512, (3, 3),
                             name='Con9',
                             activation='relu',
                             padding='SAME',
                             strides=1,
                             kernel_regularizer=l2(weightDecay))(Con8)
        Con10 = layers.Conv2D(512, (3, 3),
                              name='Con10',
                              activation='relu',
                              padding='SAME',
                              strides=1,
                              kernel_regularizer=l2(weightDecay))(Con9)
        Side4 = sideBranch(Con10, 8)
        MaxPooling4 = layers.MaxPooling2D((2, 2),
                                          name='MaxPooling4',
                                          strides=2,
                                          padding='SAME')(Con10)
        # outputs4
        Con11 = layers.Conv2D(512, (3, 3),
                              name='Con11',
                              activation='relu',
                              padding='SAME',
                              strides=1,
                              kernel_regularizer=l2(weightDecay))(MaxPooling4)
        Con12 = layers.Conv2D(512, (3, 3),
                              name='Con12',
                              activation='relu',
                              padding='SAME',
                              strides=1,
                              kernel_regularizer=l2(weightDecay))(Con11)
        Con13 = layers.Conv2D(512, (3, 3),
                              name='Con13',
                              activation='relu',
                              padding='SAME',
                              strides=1,
                              kernel_regularizer=l2(weightDecay))(Con12)
        Side5 = sideBranch(Con13, 16)
        Fuse = layers.Concatenate(axis=-1)([Side1, Side2, Side3, Side4, Side5])

        # learn fusion weight
        fuseInitWeight = initializers.constant(0.2)
        Fuse = layers.Conv2D(1, (1, 1),
                             name='Fuse',
                             padding='SAME',
                             use_bias=False,
                             activation=None,
                             kernel_initializer=fuseInitWeight,
                             kernel_regularizer=l1(weightDecay))(Fuse)

        # output1 = layers.Activation('sigmoid', name='output1')(Side1)
        # output2 = layers.Activation('sigmoid', name='output2')(Side2)
        # output3 = layers.Activation('sigmoid', name='output3')(Side3)
        # output4 = layers.Activation('sigmoid', name='output4')(Side4)
        # output5 = layers.Activation('sigmoid', name='output5')(Side5)
        output6 = layers.Activation('sigmoid', name='output6')(Fuse)

        outputs = [output6
                   ]  # [output1, output2, output3, output4, output5, output6]
        self._pModel = Model(inputs=Inputs, outputs=outputs)
        pOptimizer = optimizers.adam(lr=0.0001)
        pOptimizer = optimizers.SGD(lr=0.000001, decay=0., momentum=0.9)
        pOptimizer = tf.optimizers.SGD(lr=0.5, decay=0., momentum=0.9)
        # pOptimizer = monitorSGD(lr=0.000001, decay=0., momentum=0.9)
        # grads = tf.gradients(classBalancedSigmoidCrossEntropy, self._pModel.trainable_weights)
        # pSGD = optimizers.SGD()

        self._pModel.compile(
            loss={
                # 'output1': classBalancedSigmoidCrossEntropy,
                # 'output2': classBalancedSigmoidCrossEntropy,
                # 'output3': classBalancedSigmoidCrossEntropy,
                # 'output4': classBalancedSigmoidCrossEntropy,
                # 'output5': classBalancedSigmoidCrossEntropy,
                'output6': classBalancedSigmoidCrossEntropy
            },
            optimizer=pOptimizer)
Example #9
0
  def build(self, input_shape):
    input_shape = tensor_shape.TensorShape(input_shape).as_list()
    # TODO(fchollet): better handling of input spec
    self.input_spec = InputSpec(shape=input_shape)

    if self.stateful:
      self.reset_states()
    else:
      # initial states: 2 all-zero tensor of shape (filters)
      self.states = [None, None]

    channel_axis = -1
    if self.data_format == 'channels_first':
      raise ValueError('Only channels_last is supported!')
    self.feat_shape = (input_shape[0], input_shape[2], input_shape[3], input_shape[4])
    input_dim = input_shape[channel_axis]
    depthwise_kernel_shape = self.kernel_size + (input_dim, 2)
    pointwise_kernel_shape = (1, 1, input_dim, self.filters*2)
    recurrent_depthwise_kernel_shape = self.kernel_size + (self.filters, 3)
    recurrent_pointwise_kernel_shape = (1, 1, self.filters, self.filters*3)
    self.depthwise_kernel_shape = self.kernel_size + (input_dim, 1)
    self.pointwise_kernel_shape = (1, 1, input_dim, self.filters)

    self.depthwise_kernel = self.add_weight(
        shape=depthwise_kernel_shape,
        initializer=self.kernel_initializer,
        name='depthwise_kernel',
        regularizer=self.kernel_regularizer,
        constraint=self.kernel_constraint)
    self.pointwise_kernel = self.add_weight(
        shape=pointwise_kernel_shape,
        initializer=self.kernel_initializer,
        name='pointwise_kernel',
        regularizer=self.kernel_regularizer,
        constraint=self.kernel_constraint)
    self.recurrent_depthwise_kernel = self.add_weight(
        shape=recurrent_depthwise_kernel_shape,
        initializer=self.recurrent_initializer,
        name='recurrent_depthwise_kernel',
        regularizer=self.recurrent_regularizer,
        constraint=self.recurrent_constraint)
    self.recurrent_pointwise_kernel = self.add_weight(
        shape=recurrent_pointwise_kernel_shape,
        initializer=self.recurrent_initializer,
        name='recurrent_pointwise_kernel',
        regularizer=self.recurrent_regularizer,
        constraint=self.recurrent_constraint)
    if self.use_bias:
      self.bias = self.add_weight(
          shape=(self.filters*2,),
          initializer=self.bias_initializer,
          name='bias',
          regularizer=self.bias_regularizer,
          constraint=self.bias_constraint)
    else:
      self.bias = None

    gate_kernel_shape = (1, 1, input_dim, self.filters*2)
    recurrent_gate_kernel_shape = (1, 1, self.filters, self.filters*2)
    self.gate_kernel = self.add_weight(
        shape=gate_kernel_shape,
        initializer=initializers.constant(value=1.0/input_dim),
        name='gate_kernel',
        regularizer=self.kernel_regularizer,
        constraint=self.kernel_constraint)
    self.recurrent_gate_kernel = self.add_weight(
        shape=recurrent_gate_kernel_shape,
        initializer=initializers.constant(value=1.0/self.filters),
        name='recurrent_gate_kernel',
        regularizer=self.recurrent_regularizer,
        constraint=self.recurrent_constraint)
    self.gate_bias = self.add_weight(
        shape=(self.filters*2,),
        initializer=self.bias_initializer,
        name='gate_bias',
        regularizer=self.bias_regularizer,
        constraint=self.bias_constraint)

    self.kernel_f = self.gate_kernel[:, :, :, :self.filters]
    self.recurrent_kernel_f = self.recurrent_gate_kernel[:, :, :, :self.filters]
    self.kernel_o = self.gate_kernel[:, :, :, self.filters:self.filters * 2]
    self.recurrent_kernel_o = self.recurrent_gate_kernel[:, :, :, self.filters:
                                                    self.filters * 2]

    self.depthwise_kernel_c = self.depthwise_kernel[:, :, :, :1]
    self.pointwise_kernel_c = self.pointwise_kernel[:, :, :, :self.filters]
    self.recurrent_depthwise_kernel_c = self.recurrent_depthwise_kernel[:, :, :, :1]
    self.recurrent_pointwise_kernel_c = self.recurrent_pointwise_kernel[:, :, :, :self.filters]

    self.depthwise_kernel_i = self.depthwise_kernel[:, :, :, 1:]
    self.pointwise_kernel_i = self.pointwise_kernel[:, :, :, self.filters:]
    self.recurrent_depthwise_kernel_i = self.recurrent_depthwise_kernel[:, :, :, 1:2]
    self.recurrent_pointwise_kernel_i = self.recurrent_pointwise_kernel[:, :, :, self.filters:self.filters*2]
    
    self.attention_weight_d = self.recurrent_depthwise_kernel[:, :, :, 2:]
    self.attention_weight_p = self.recurrent_pointwise_kernel[:, :, :, self.filters*2:]

    if self.use_bias:
      self.bias_f = self.gate_bias[:self.filters]
      self.bias_o = self.gate_bias[self.filters:self.filters * 2]
      self.bias_c = self.bias[:self.filters]
      self.bias_i = self.bias[self.filters:]
    else:
      self.bias_f = None
      self.bias_o = None
      self.bias_c = None
      self.bias_i = None
    self.built = True
Example #10
0
    imgarr_res, statarr_res, numarr_res, test_size=0.20)

print("Building Model.")

#Building model begins here: see the layers below
#first branch: a cnn for images

cnninput = tensorflow.keras.layers.Input(shape=input_shape)
conv = cnninput
conv = (Conv2D(32,
               kernel_size=(9, 9),
               strides=(1, 1),
               activation='relu',
               padding="same",
               kernel_initializer=GlorotUniform(),
               bias_initializer=constant(0.1),
               kernel_regularizer=l2(0.1)))(conv)

conv = (BatchNormalization())(conv)

conv = (MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same"))(conv)

conv = (Conv2D(64,
               kernel_size=(9, 9),
               strides=(1, 1),
               activation='relu',
               padding="same",
               kernel_initializer=GlorotUniform(),
               bias_initializer=constant(0.1),
               kernel_regularizer=l2(0.1)))(conv)