示例#1
0
    def initialise_network(self):
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        self.net = ApplicationNetFactory.create(self.net_param.name)(
            num_classes=1,
            w_regularizer=w_regularizer,
            b_regularizer=b_regularizer,
            acti_func=self.net_param.activation_function)

        self.net2 = ApplicationNetFactory.create(self.net2_param.name)(
            num_classes=1,
            w_regularizer=w_regularizer,
            b_regularizer=b_regularizer,
            acti_func=self.net2_param.activation_function)
示例#2
0
    def initialise_network(self):
        print("Initializing network")
        #IMPORTING REGULARIZERS w AND b
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        #---------W_INI = "he_normal", -- application_factory.py
        w_ini= InitializerFactory.get_initializer(name=self.net_param.weight_initializer)

        print("wWwWwWwWwWWWWWWwWWWWWWWWWweight_initializer; ", self.net_param.weight_initializer)
        print("NNNNNNNNname of application: ", self.net_param.name)

        #SELF.NET_PARAM.NAME = DENSE_VET 
        #Create dense_vnet and initialize with regularizers and activ funcs.
        self.net = ApplicationNetFactory.create(self.net_param.name)(
            num_classes=self.segmentation_param.num_classes,
            w_initializer=w_ini,
            b_initializer=InitializerFactory.get_initializer(
                name=self.net_param.bias_initializer),
            w_regularizer=w_regularizer,
            b_regularizer=b_regularizer,
            acti_func=self.net_param.activation_function)
    def test_sum_regularizer(self):
        l1_function = regularizers.l1_regularizer(.1)
        l2_function = regularizers.l2_regularizer(.2)
        self.assertIsNone(regularizers.sum_regularizer([]))
        self.assertIsNone(regularizers.sum_regularizer([None]))
        self.assertIsNone(
            regularizers.sum_regularizer([regularizers.l1_regularizer(.0)
                                          ])(None))

        values = np.array([-3.])
        weights = constant_op.constant(values)
        with session.Session() as sess:
            l1_reg1 = regularizers.sum_regularizer([l1_function])
            l1_result1 = sess.run(l1_reg1(weights))

            l1_reg2 = regularizers.sum_regularizer([l1_function, None])
            l1_result2 = sess.run(l1_reg2(weights))

            l1_reg3 = regularizers.sum_regularizer(
                [l1_function, regularizers.l2_regularizer(.0)])
            l1_result3 = sess.run(l1_reg3(weights))

            l1_l2_reg = regularizers.sum_regularizer(
                [l1_function, l2_function])
            l1_l2_result = sess.run(l1_l2_reg(weights))

        self.assertAllClose(.1 * np.abs(values).sum(), l1_result1)
        self.assertAllClose(.1 * np.abs(values).sum(), l1_result2)
        self.assertAllClose(.1 * np.abs(values).sum(), l1_result3)
        self.assertAllClose(
            .1 * np.abs(values).sum() + .2 * np.power(values, 2).sum() / 2.0,
            l1_l2_result)
示例#4
0
 def __init__(self):
   super(RegularizedNetwork, self).__init__()
   self.l1 = self.track_layer(core.Dense(
       1,
       bias_regularizer=regularizers.l1_regularizer(2.0),
       kernel_regularizer=regularizers.l1_regularizer(2.0)))
   self.l2 = self.track_layer(core.Dense(
       1,
       bias_regularizer=regularizers.l1_regularizer(2.0)))
def slim_net_original(image, keep_prob):
    with arg_scope([layers.conv2d, layers.fully_connected], biases_initializer=tf.random_normal_initializer(stddev=0.1)):

        # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME',
        # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None,
        # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None,
        # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None):
        net = layers.conv2d(image, 32, [5, 5], scope='conv1', weights_regularizer=regularizers.l1_regularizer(0.5))

        # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None)
        net = layers.max_pool2d(net, 2, scope='pool1')

        net = layers.conv2d(net, 64, [5, 5], scope='conv2', weights_regularizer=regularizers.l2_regularizer(0.5))
        summaries.summarize_tensor(net, tag='conv2')

        net = layers.max_pool2d(net, 2, scope='pool2')

        net = layers.flatten(net, scope='flatten1')

        # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None,
        # normalizer_params=None, weights_initializer=initializers.xavier_initializer(),
        # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer,
        # biases_regularizer=None, scope=None):
        net = layers.fully_connected(net, 1024, scope='fc1')

        # dropout(inputs, keep_prob=0.5, is_training=True, scope=None)
        net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1')

        net = layers.fully_connected(net, 10, scope='fc2')
    return net
    def test_xavier_wrong_dtype(self):
        with self.assertRaisesRegexp(
                TypeError,
                'Cannot create initializer for non-floating point type.'):
            initializers.xavier_initializer(dtype=dtypes.int32)

        self.assertIsNone(regularizers.l1_regularizer(0.)(None))
    def initialise_network(self):
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        self.net = ApplicationNetFactory.create(self.net_param.name)(
            w_regularizer=w_regularizer,
            b_regularizer=b_regularizer)
    def __init__(self, config, n_node, sess):
        self.batch_size = config.batch_size  # bach size
        self.feat_in = config.feat_in  # number of feature
        self.feat_out = config.feat_out  # number of output feature
        self.num_nodes = config.num_nodes  # each sampel has num_nodes
        ##Need to import laplacian, lmax
        self.lmax = config.lmax
        self.sess = sess
        if config.activation == "tanh":
            self.activation = tf.tanh
        else:
            self.activation = tf.nn.relu
        self.max_grad_norm = config.max_grad_norm
        self.num_hidden = config.num_hidden  # rnn hidden layer
        self.num_kernel = config.num_kernel  # chebshevy K
        self.learning_rate = config.learning_rate
        self.n_time_interval = config.n_time_interval
        self.n_steps = config.n_steps  # number of steps
        self.n_hidden_dense1 = config.n_hidden_dense1
        self.n_hidden_dense2 = config.n_hidden_dense2
        self.scale1 = config.l1
        self.scale2 = config.l2
        self.scale = config.l1l2
        self.n_nodes = n_node
        self.initializer = tf.random_normal_initializer(stddev=config.stddev)
        self.initializer2 = tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
        self.regularizer = regularizers.l1_l2_regularizer(self.scale1, self.scale2)
        self.regularizer_1 = regularizers.l1_regularizer(self.scale1)
        self.regularizer_2 = regularizers.l2_regularizer(self.scale2)
        self.model_step = tf.Variable(0, name='model_step', trainable=False)
        self._build_placeholders()
        self._build_var()
        self.pred = self._build_model()
        truth = self.y  # [32,1]

        # # Define loss and optimizer
        cost = tf.reduce_mean(tf.pow(self.pred - truth, 2)) + self.scale * tf.add_n(
            [self.regularizer(var) for var in tf.trainable_variables()])

        error = tf.reduce_mean(tf.pow(self.pred - truth, 2))
        tf.summary.scalar("error", error)

        var_list = tf.trainable_variables()

        opt1 = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        grads = tf.gradients(cost, var_list)
        grads_c = [tf.clip_by_norm(grad, self.max_grad_norm) for grad in grads]  # 防止梯度爆炸

        train_op = opt1.apply_gradients(zip(grads_c, var_list), global_step=self.model_step, name='train_op')

        self.loss = cost
        self.error = error

        self.train_op = train_op
        init_op = tf.global_variables_initializer()
        self.sess.run(init_op)
示例#9
0
    def initialise_network(self):
        '''
        Initialise the network and specifies the ordering of elements
        :return:
        '''
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        self.net = ApplicationNetFactory.create(
            'niftynet.contrib.csv_reader.toynet_features.ToyNetFeat')(
                num_classes=self.segmentation_param.num_classes,
                w_initializer=InitializerFactory.get_initializer(
                    name=self.net_param.weight_initializer),
                b_initializer=InitializerFactory.get_initializer(
                    name=self.net_param.bias_initializer),
                w_regularizer=w_regularizer,
                b_regularizer=b_regularizer,
                acti_func=self.net_param.activation_function)
        self.net_multi = ApplicationNetFactory.create(
            'niftynet.contrib.csv_reader.class_seg_finnet.ClassSegFinnet')(
                num_classes=self.segmentation_param.num_classes,
                w_initializer=InitializerFactory.get_initializer(
                    name=self.net_param.weight_initializer),
                b_initializer=InitializerFactory.get_initializer(
                    name=self.net_param.bias_initializer),
                w_regularizer=w_regularizer,
                b_regularizer=b_regularizer,
                acti_func=self.net_param.activation_function)
    def initialise_network(self):
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        self.net = ApplicationNetFactory.create(self.net_param.name)(
            num_classes=self.classification_param.num_classes,
            w_initializer=InitializerFactory.get_initializer(
                name=self.net_param.weight_initializer),
            b_initializer=InitializerFactory.get_initializer(
                name=self.net_param.bias_initializer),
            w_regularizer=w_regularizer,
            b_regularizer=b_regularizer,
            acti_func=self.net_param.activation_function)
    def initialise_network(self):
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        self.net = ApplicationNetFactory.create('toynet')(
            num_classes=self.multioutput_param.num_classes,
            w_initializer=InitializerFactory.get_initializer(
                name=self.net_param.weight_initializer),
            b_initializer=InitializerFactory.get_initializer(
                name=self.net_param.bias_initializer),
            w_regularizer=w_regularizer,
            b_regularizer=b_regularizer,
            acti_func=self.net_param.activation_function)
    def test_l1(self):
        with self.assertRaises(ValueError):
            regularizers.l1_regularizer(-1.)
        with self.assertRaises(ValueError):
            regularizers.l1_regularizer(0)

        self.assertIsNone(regularizers.l1_regularizer(0.)(None))

        values = np.array([1., -1., 4., 2.])
        weights = constant_op.constant(values)
        with session.Session() as sess:
            result = sess.run(regularizers.l1_regularizer(.5)(weights))

        self.assertAllClose(np.abs(values).sum() * .5, result)
示例#13
0
  def test_l1(self):
    with self.assertRaises(ValueError):
      regularizers.l1_regularizer(-1.)
    with self.assertRaises(ValueError):
      regularizers.l1_regularizer(0)

    self.assertIsNone(regularizers.l1_regularizer(0.)(None))

    values = np.array([1., -1., 4., 2.])
    weights = constant_op.constant(values)
    with session.Session() as sess:
      result = sess.run(regularizers.l1_regularizer(.5)(weights))

    self.assertAllClose(np.abs(values).sum() * .5, result)
def slim_net_original(image, keep_prob):
    with arg_scope(
        [layers.conv2d, layers.fully_connected],
            biases_initializer=tf.random_normal_initializer(stddev=0.1)):

        # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME',
        # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None,
        # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None,
        # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None):
        net = layers.conv2d(
            image,
            32, [5, 5],
            scope='conv1',
            weights_regularizer=regularizers.l1_regularizer(0.5))

        # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None)
        net = layers.max_pool2d(net, 2, scope='pool1')

        net = layers.conv2d(
            net,
            64, [5, 5],
            scope='conv2',
            weights_regularizer=regularizers.l2_regularizer(0.5))
        summaries.summarize_tensor(net, tag='conv2')

        net = layers.max_pool2d(net, 2, scope='pool2')

        net = layers.flatten(net, scope='flatten1')

        # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None,
        # normalizer_params=None, weights_initializer=initializers.xavier_initializer(),
        # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer,
        # biases_regularizer=None, scope=None):
        net = layers.fully_connected(net, 1024, scope='fc1')

        # dropout(inputs, keep_prob=0.5, is_training=True, scope=None)
        net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1')

        net = layers.fully_connected(net, 10, scope='fc2')
    return net
示例#15
0
  def test_sum_regularizer(self):
    l1_function = regularizers.l1_regularizer(.1)
    l2_function = regularizers.l2_regularizer(.2)
    self.assertIsNone(regularizers.sum_regularizer([]))
    self.assertIsNone(regularizers.sum_regularizer([None]))

    values = np.array([-3.])
    weights = constant_op.constant(values)
    with session.Session() as sess:
      l1_reg1 = regularizers.sum_regularizer([l1_function])
      l1_result1 = sess.run(l1_reg1(weights))

      l1_reg2 = regularizers.sum_regularizer([l1_function, None])
      l1_result2 = sess.run(l1_reg2(weights))

      l1_l2_reg = regularizers.sum_regularizer([l1_function, l2_function])
      l1_l2_result = sess.run(l1_l2_reg(weights))

    self.assertAllClose(.1 * np.abs(values).sum(), l1_result1)
    self.assertAllClose(.1 * np.abs(values).sum(), l1_result2)
    self.assertAllClose(
        .1 * np.abs(values).sum() + .2 * np.power(values, 2).sum() / 2.0,
        l1_l2_result)
示例#16
0
  def test_xavier_wrong_dtype(self):
    with self.assertRaisesRegexp(
        TypeError, 'Cannot create initializer for non-floating point type.'):
      initializers.xavier_initializer(dtype=dtypes.int32)

    self.assertIsNone(regularizers.l1_regularizer(0.)(None))