Пример #1
0
    def identity_block(self, input, kernel_size, filters):
        filters1, filters2, filters3 = filters

        output = Conv2D(num_outputs=filters1,
                        kernel_size=1,
                        activation='linear',
                        padding='same',
                        in_layers=[input])
        output = BatchNorm(in_layers=[output])
        output = ReLU(output)

        output = Conv2D(num_outputs=filters2,
                        kernel_size=kernel_size,
                        activation='linear',
                        padding='same',
                        in_layers=[input])
        output = BatchNorm(in_layers=[output])
        output = ReLU(output)

        output = Conv2D(num_outputs=filters3,
                        kernel_size=1,
                        activation='linear',
                        padding='same',
                        in_layers=[input])
        output = BatchNorm(in_layers=[output])

        output = Add(in_layers=[output, input])
        output = ReLU(output)

        return output
Пример #2
0
 def test_relu(self):
     """Test that Sigmoid can be invoked."""
     batch_size = 10
     n_features = 5
     in_tensor = np.random.rand(batch_size, n_features)
     with self.session() as sess:
         in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
         out_tensor = ReLU()(in_tensor)
         out_tensor = out_tensor.eval()
         assert out_tensor.shape == (batch_size, n_features)
Пример #3
0
 def test_relu(self):
   """Test that Sigmoid can be invoked."""
   batch_size = 10
   n_features = 5
   in_tensor = np.random.rand(batch_size, n_features)
   with self.session() as sess:
     in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
     out_tensor = ReLU()(in_tensor)
     out_tensor = out_tensor.eval()
     assert out_tensor.shape == (batch_size, n_features)
Пример #4
0
def test_ReLU_pickle():
    tg = TensorGraph()
    feature = Feature(shape=(tg.batch_size, 1))
    layer = ReLU(in_layers=feature)
    tg.add_output(layer)
    tg.set_loss(layer)
    tg.build()
    tg.save()
Пример #5
0
    def _build(self):
        self.A_tilda_k = list()
        for k in range(1, self.k_max + 1):
            self.A_tilda_k.append(
                Feature(name="graph_adjacency_{}".format(k),
                        dtype=tf.float32,
                        shape=[None, self.max_nodes, self.max_nodes]))
        self.X = Feature(name='atom_features',
                         dtype=tf.float32,
                         shape=[None, self.max_nodes, self.num_node_features])

        graph_layers = list()
        adaptive_filters = list()

        for index, k in enumerate(range(1, self.k_max + 1)):

            in_layers = [self.A_tilda_k[index], self.X]

            adaptive_filters.append(
                AdaptiveFilter(batch_size=self.batch_size,
                               in_layers=in_layers,
                               num_nodes=self.max_nodes,
                               num_node_features=self.num_node_features,
                               combine_method=self.combine_method))

            graph_layers.append(
                KOrderGraphConv(batch_size=self.batch_size,
                                in_layers=in_layers +
                                [adaptive_filters[index]],
                                num_nodes=self.max_nodes,
                                num_node_features=self.num_node_features,
                                init='glorot_uniform'))

        graph_features = Concat(in_layers=graph_layers, axis=2)
        graph_features = ReLU(in_layers=[graph_features])
        flattened = Flatten(in_layers=[graph_features])

        dense1 = Dense(in_layers=[flattened],
                       out_channels=64,
                       activation_fn=tf.nn.relu)
        dense2 = Dense(in_layers=[dense1],
                       out_channels=16,
                       activation_fn=tf.nn.relu)
        dense3 = Dense(in_layers=[dense2],
                       out_channels=1 * self.n_tasks,
                       activation_fn=None)
        output = Reshape(in_layers=[dense3], shape=(-1, self.n_tasks, 1))
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        weights = Weights(shape=(None, self.n_tasks))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Пример #6
0
    def __init__(self,
                 img_rows=224,
                 img_cols=224,
                 weights="imagenet",
                 classes=1000,
                 **kwargs):
        super(ResNet50, self).__init__(use_queue=False, **kwargs)
        self.img_cols = img_cols
        self.img_rows = img_rows
        self.weights = weights
        self.classes = classes

        input = Feature(shape=(None, self.img_rows, self.img_cols, 3))
        labels = Label(shape=(None, self.classes))

        conv1 = Conv2D(num_outputs=64,
                       kernel_size=7,
                       stride=2,
                       activation='linear',
                       padding='same',
                       in_layers=[input])
        bn1 = BatchNorm(in_layers=[conv1])
        ac1 = ReLU(bn1)
        pool1 = MaxPool2D(ksize=[1, 3, 3, 1], in_layers=[bn1])

        cb1 = self.conv_block(pool1, 3, [64, 64, 256], 1)
        id1 = self.identity_block(cb1, 3, [64, 64, 256])
        id1 = self.identity_block(id1, 3, [64, 64, 256])

        cb2 = self.conv_block(id1, 3, [128, 128, 512])
        id2 = self.identity_block(cb2, 3, [128, 128, 512])
        id2 = self.identity_block(id2, 3, [128, 128, 512])
        id2 = self.identity_block(id2, 3, [128, 128, 512])

        cb3 = self.conv_block(id2, 3, [256, 256, 1024])
        id3 = self.identity_block(cb3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])
        id3 = self.identity_block(cb3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])

        cb4 = self.conv_block(id3, 3, [512, 512, 2048])
        id4 = self.identity_block(cb4, 3, [512, 512, 2048])
        id4 = self.identity_block(id4, 3, [512, 512, 2048])

        pool2 = AvgPool2D(ksize=[1, 7, 7, 1], in_layers=[id4])

        flatten = Flatten(in_layers=[pool2])
        dense = Dense(classes, in_layers=[flatten])

        loss = SoftMaxCrossEntropy(in_layers=[labels, dense])
        loss = ReduceMean(in_layers=[loss])
        self.set_loss(loss)
        self.add_output(dense)
Пример #7
0
    def conv_block(self, input, kernel_size, filters, strides=2):
        filters1, filters2, filters3 = filters

        output = Conv2D(num_outputs=filters1,
                        kernel_size=1,
                        stride=strides,
                        activation='linear',
                        padding='same',
                        in_layers=[input])
        output = BatchNorm(in_layers=[output])
        output = ReLU(output)

        output = Conv2D(num_outputs=filters2,
                        kernel_size=kernel_size,
                        activation='linear',
                        padding='same',
                        in_layers=[output])
        output = BatchNorm(in_layers=[output])
        output = ReLU(output)

        output = Conv2D(num_outputs=filters3,
                        kernel_size=1,
                        activation='linear',
                        padding='same',
                        in_layers=[output])
        output = BatchNorm(in_layers=[output])

        shortcut = Conv2D(num_outputs=filters3,
                          kernel_size=1,
                          stride=strides,
                          activation='linear',
                          padding='same',
                          in_layers=[input])
        shortcut = BatchNorm(in_layers=[shortcut])
        output = Add(in_layers=[shortcut, output])
        output = ReLU(output)

        return output
Пример #8
0
    def build_graph(self):
        # inputs placeholder
        self.inputs = Feature(shape=(None, self.image_size, self.image_size,
                                     3),
                              dtype=tf.float32)
        # data preprocessing and augmentation
        in_layer = DRAugment(self.augment,
                             self.batch_size,
                             size=(self.image_size, self.image_size),
                             in_layers=[self.inputs])
        # first conv layer
        in_layer = Conv2D(int(self.n_init_kernel),
                          kernel_size=7,
                          activation_fn=None,
                          in_layers=[in_layer])
        in_layer = BatchNorm(in_layers=[in_layer])
        in_layer = ReLU(in_layers=[in_layer])

        # downsample by max pooling
        res_in = MaxPool2D(ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           in_layers=[in_layer])

        for ct_module in range(self.n_downsample - 1):
            # each module is a residual convolutional block
            # followed by a convolutional downsample layer
            in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)),
                              kernel_size=1,
                              activation_fn=None,
                              in_layers=[res_in])
            in_layer = BatchNorm(in_layers=[in_layer])
            in_layer = ReLU(in_layers=[in_layer])
            in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)),
                              kernel_size=3,
                              activation_fn=None,
                              in_layers=[in_layer])
            in_layer = BatchNorm(in_layers=[in_layer])
            in_layer = ReLU(in_layers=[in_layer])
            in_layer = Conv2D(int(self.n_init_kernel * 2**ct_module),
                              kernel_size=1,
                              activation_fn=None,
                              in_layers=[in_layer])
            res_a = BatchNorm(in_layers=[in_layer])

            res_out = res_in + res_a
            res_in = Conv2D(int(self.n_init_kernel * 2**(ct_module + 1)),
                            kernel_size=3,
                            stride=2,
                            in_layers=[res_out])
            res_in = BatchNorm(in_layers=[res_in])

        # max pooling over the final outcome
        in_layer = ReduceMax(axis=(1, 2), in_layers=[res_in])

        for layer_size in self.n_fully_connected:
            # fully connected layers
            in_layer = Dense(layer_size,
                             activation_fn=tf.nn.relu,
                             in_layers=[in_layer])
            # dropout for dense layers
            #in_layer = Dropout(0.25, in_layers=[in_layer])

        logit_pred = Dense(self.n_tasks * self.n_classes,
                           activation_fn=None,
                           in_layers=[in_layer])
        logit_pred = Reshape(shape=(None, self.n_tasks, self.n_classes),
                             in_layers=[logit_pred])

        weights = Weights(shape=(None, self.n_tasks))
        labels = Label(shape=(None, self.n_tasks), dtype=tf.int32)

        output = SoftMax(logit_pred)
        self.add_output(output)
        loss = SparseSoftMaxCrossEntropy(in_layers=[labels, logit_pred])
        weighted_loss = WeightedError(in_layers=[loss, weights])

        # weight decay regularizer
        # weighted_loss = WeightDecay(0.1, 'l2', in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Пример #9
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 alpha_init_stddevs=0.02,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 **kwargs):
        """Creates a progressive network.
  
    Only listing parameters specific to progressive networks here.

    Parameters
    ----------
    n_tasks: int
      Number of tasks
    n_features: int
      Number of input features
    alpha_init_stddevs: list
      List of standard-deviations for alpha in adapter layers.
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    bias_init_consts: list or float
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    """

        super(ProgressiveMultitaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        self.layer_sizes = layer_sizes
        self.alpha_init_stddevs = alpha_init_stddevs
        self.weight_init_stddevs = weight_init_stddevs
        self.bias_init_consts = bias_init_consts
        self.dropouts = dropouts
        self.activation_fns = activation_fns

        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            self.weight_init_stddevs = [weight_init_stddevs] * n_layers
        if not isinstance(alpha_init_stddevs, collections.Sequence):
            self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers
        if not isinstance(bias_init_consts, collections.Sequence):
            self.bias_init_consts = [bias_init_consts] * n_layers
        if not isinstance(dropouts, collections.Sequence):
            self.dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            self.activation_fns = [activation_fns] * n_layers

        # Add the input features.
        self.mol_features = Feature(shape=(None, n_features))

        all_layers = {}
        outputs = []
        for task in range(self.n_tasks):
            task_layers = []
            for i in range(n_layers):
                if i == 0:
                    prev_layer = self.mol_features
                else:
                    prev_layer = all_layers[(i - 1, task)]
                    if task > 0:
                        lateral_contrib, trainables = self.add_adapter(
                            all_layers, task, i)
                        task_layers.extend(trainables)

                layer = Dense(in_layers=[prev_layer],
                              out_channels=layer_sizes[i],
                              activation_fn=None,
                              weights_initializer=TFWrapper(
                                  tf.truncated_normal_initializer,
                                  stddev=self.weight_init_stddevs[i]),
                              biases_initializer=TFWrapper(
                                  tf.constant_initializer,
                                  value=self.bias_init_consts[i]))
                task_layers.append(layer)

                if i > 0 and task > 0:
                    layer = layer + lateral_contrib
                assert self.activation_fns[
                    i] is tf.nn.relu, "Only ReLU is supported"
                layer = ReLU(in_layers=[layer])
                if self.dropouts[i] > 0.0:
                    layer = Dropout(self.dropouts[i], in_layers=[layer])
                all_layers[(i, task)] = layer

            prev_layer = all_layers[(n_layers - 1, task)]
            layer = Dense(in_layers=[prev_layer],
                          out_channels=1,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=self.weight_init_stddevs[-1]),
                          biases_initializer=TFWrapper(
                              tf.constant_initializer,
                              value=self.bias_init_consts[-1]))
            task_layers.append(layer)

            if task > 0:
                lateral_contrib, trainables = self.add_adapter(
                    all_layers, task, n_layers)
                task_layers.extend(trainables)
                layer = layer + lateral_contrib
            outputs.append(layer)
            self.add_output(layer)
            task_label = Label(shape=(None, 1))
            task_weight = Weights(shape=(None, 1))
            weighted_loss = ReduceSum(
                L2Loss(in_layers=[task_label, layer, task_weight]))
            self.create_submodel(layers=task_layers,
                                 loss=weighted_loss,
                                 optimizer=None)
        # Weight decay not activated
        """